diff --git a/Documentation/ABI/testing/sysfs-ptp b/Documentation/ABI/testing/sysfs-ptp index 2363ad810d..d378f57c1b 100644 --- a/Documentation/ABI/testing/sysfs-ptp +++ b/Documentation/ABI/testing/sysfs-ptp @@ -33,6 +33,13 @@ Description: frequency adjustment value (a positive integer) in parts per billion. +What: /sys/class/ptp/ptpN/max_vclocks +Date: May 2021 +Contact: Yangbo Lu +Description: + This file contains the maximum number of ptp vclocks. + Write integer to re-configure it. + What: /sys/class/ptp/ptpN/n_alarms Date: September 2010 Contact: Richard Cochran @@ -61,6 +68,19 @@ Description: This file contains the number of programmable pins offered by the PTP hardware clock. +What: /sys/class/ptp/ptpN/n_vclocks +Date: May 2021 +Contact: Yangbo Lu +Description: + This file contains the number of virtual PTP clocks in + use. By default, the value is 0 meaning that only the + physical clock is in use. Setting the value creates + the corresponding number of virtual clocks and causes + the physical clock to become free running. Setting the + value back to 0 deletes the virtual clocks and + switches the physical clock back to normal, adjustable + operation. + What: /sys/class/ptp/ptpN/pins Date: March 2014 Contact: Richard Cochran diff --git a/Documentation/dev-tools/kunit/running_tips.rst b/Documentation/dev-tools/kunit/running_tips.rst index 7d99386cf9..d1626d548f 100644 --- a/Documentation/dev-tools/kunit/running_tips.rst +++ b/Documentation/dev-tools/kunit/running_tips.rst @@ -86,19 +86,7 @@ Generating code coverage reports under UML .. note:: TODO(brendanhiggins@google.com): There are various issues with UML and versions of gcc 7 and up. You're likely to run into missing ``.gcda`` - files or compile errors. We know one `faulty GCC commit - `_ - but not how we'd go about getting this fixed. The compile errors still - need some investigation. - -.. note:: - TODO(brendanhiggins@google.com): for recent versions of Linux - (5.10-5.12, maybe earlier), there's a bug with gcov counters not being - flushed in UML. This translates to very low (<1%) reported coverage. This is - related to the above issue and can be worked around by replacing the - one call to ``uml_abort()`` (it's in ``os_dump_core()``) with a plain - ``exit()``. - + files or compile errors. This is different from the "normal" way of getting coverage information that is documented in Documentation/dev-tools/gcov.rst. diff --git a/Documentation/devicetree/bindings/clock/brcm,iproc-clocks.yaml b/Documentation/devicetree/bindings/clock/brcm,iproc-clocks.yaml index 8dc7b404ee..1174c9aa99 100644 --- a/Documentation/devicetree/bindings/clock/brcm,iproc-clocks.yaml +++ b/Documentation/devicetree/bindings/clock/brcm,iproc-clocks.yaml @@ -50,7 +50,6 @@ properties: reg: minItems: 1 - maxItems: 3 items: - description: base register - description: power register diff --git a/Documentation/devicetree/bindings/display/renesas,du.yaml b/Documentation/devicetree/bindings/display/renesas,du.yaml index 5f4345d430..e3ca5389c1 100644 --- a/Documentation/devicetree/bindings/display/renesas,du.yaml +++ b/Documentation/devicetree/bindings/display/renesas,du.yaml @@ -92,7 +92,6 @@ required: - reg - clocks - interrupts - - resets - ports allOf: diff --git a/Documentation/devicetree/bindings/hwmon/adt7475.yaml b/Documentation/devicetree/bindings/hwmon/adt7475.yaml index ad0ec9f35b..7d9c083632 100644 --- a/Documentation/devicetree/bindings/hwmon/adt7475.yaml +++ b/Documentation/devicetree/bindings/hwmon/adt7475.yaml @@ -39,17 +39,7 @@ properties: reg: maxItems: 1 -patternProperties: - "^adi,bypass-attenuator-in[0-4]$": - description: | - Configures bypassing the individual voltage input attenuator. If - set to 1 the attenuator is bypassed if set to 0 the attenuator is - not bypassed. If the property is absent then the attenuator - retains it's configuration from the bios/bootloader. - $ref: /schemas/types.yaml#/definitions/uint32 - enum: [0, 1] - - "^adi,pwm-active-state$": + adi,pwm-active-state: description: | Integer array, represents the active state of the pwm outputs If set to 0 the pwm uses a logic low output for 100% duty cycle. If set to 1 the pwm @@ -61,6 +51,16 @@ patternProperties: enum: [0, 1] default: 1 +patternProperties: + "^adi,bypass-attenuator-in[0-4]$": + description: | + Configures bypassing the individual voltage input attenuator. If + set to 1 the attenuator is bypassed if set to 0 the attenuator is + not bypassed. If the property is absent then the attenuator + retains it's configuration from the bios/bootloader. + $ref: /schemas/types.yaml#/definitions/uint32 + enum: [0, 1] + required: - compatible - reg diff --git a/Documentation/devicetree/bindings/iommu/arm,smmu.yaml b/Documentation/devicetree/bindings/iommu/arm,smmu.yaml index 1181b590db..03f2b2d4db 100644 --- a/Documentation/devicetree/bindings/iommu/arm,smmu.yaml +++ b/Documentation/devicetree/bindings/iommu/arm,smmu.yaml @@ -52,16 +52,14 @@ properties: items: - const: marvell,ap806-smmu-500 - const: arm,mmu-500 - - description: NVIDIA SoCs that program two ARM MMU-500s identically - items: - description: NVIDIA SoCs that require memory controller interaction and may program multiple ARM MMU-500s identically with the memory controller interleaving translations between multiple instances for improved performance. items: - enum: - - const: nvidia,tegra194-smmu - - const: nvidia,tegra186-smmu + - nvidia,tegra194-smmu + - nvidia,tegra186-smmu - const: nvidia,smmu-500 - items: - const: arm,mmu-500 diff --git a/Documentation/devicetree/bindings/iommu/rockchip,iommu.yaml b/Documentation/devicetree/bindings/iommu/rockchip,iommu.yaml index d2e28a9e35..ba9124f721 100644 --- a/Documentation/devicetree/bindings/iommu/rockchip,iommu.yaml +++ b/Documentation/devicetree/bindings/iommu/rockchip,iommu.yaml @@ -28,14 +28,12 @@ properties: - description: configuration registers for MMU instance 0 - description: configuration registers for MMU instance 1 minItems: 1 - maxItems: 2 interrupts: items: - description: interruption for MMU instance 0 - description: interruption for MMU instance 1 minItems: 1 - maxItems: 2 clocks: items: diff --git a/Documentation/devicetree/bindings/memory-controllers/arm,pl353-smc.yaml b/Documentation/devicetree/bindings/memory-controllers/arm,pl353-smc.yaml index 7a63c85ef8..01c9acf927 100644 --- a/Documentation/devicetree/bindings/memory-controllers/arm,pl353-smc.yaml +++ b/Documentation/devicetree/bindings/memory-controllers/arm,pl353-smc.yaml @@ -57,7 +57,6 @@ properties: ranges: minItems: 1 - maxItems: 3 description: | Memory bus areas for interacting with the devices. Reflects the memory layout with four integer values following: diff --git a/Documentation/devicetree/bindings/mtd/brcm,brcmnand.yaml b/Documentation/devicetree/bindings/mtd/brcm,brcmnand.yaml index e5f1a33332..dd5a64969e 100644 --- a/Documentation/devicetree/bindings/mtd/brcm,brcmnand.yaml +++ b/Documentation/devicetree/bindings/mtd/brcm,brcmnand.yaml @@ -84,7 +84,6 @@ properties: interrupts: minItems: 1 - maxItems: 3 items: - description: NAND CTLRDY interrupt - description: FLASH_DMA_DONE if flash DMA is available @@ -92,7 +91,6 @@ properties: interrupt-names: minItems: 1 - maxItems: 3 items: - const: nand_ctlrdy - const: flash_dma_done @@ -148,8 +146,6 @@ allOf: then: properties: reg-names: - minItems: 2 - maxItems: 2 items: - const: nand - const: nand-int-base @@ -161,8 +157,6 @@ allOf: then: properties: reg-names: - minItems: 3 - maxItems: 3 items: - const: nand - const: nand-int-base @@ -175,8 +169,6 @@ allOf: then: properties: reg-names: - minItems: 3 - maxItems: 3 items: - const: nand - const: iproc-idm diff --git a/Documentation/devicetree/bindings/net/dsa/nxp,sja1105.yaml b/Documentation/devicetree/bindings/net/dsa/nxp,sja1105.yaml index 0b8a05dd52..f978f8719d 100644 --- a/Documentation/devicetree/bindings/net/dsa/nxp,sja1105.yaml +++ b/Documentation/devicetree/bindings/net/dsa/nxp,sja1105.yaml @@ -67,8 +67,8 @@ properties: reg: oneOf: - enum: - - 0 - - 1 + - 0 + - 1 required: - compatible diff --git a/Documentation/devicetree/bindings/net/gpmc-eth.txt b/Documentation/devicetree/bindings/net/gpmc-eth.txt index f7da3d73ca..32821066a8 100644 --- a/Documentation/devicetree/bindings/net/gpmc-eth.txt +++ b/Documentation/devicetree/bindings/net/gpmc-eth.txt @@ -13,7 +13,7 @@ Documentation/devicetree/bindings/memory-controllers/omap-gpmc.txt For the properties relevant to the ethernet controller connected to the GPMC refer to the binding documentation of the device. For example, the documentation -for the SMSC 911x is Documentation/devicetree/bindings/net/smsc911x.txt +for the SMSC 911x is Documentation/devicetree/bindings/net/smsc,lan9115.yaml Child nodes need to specify the GPMC bus address width using the "bank-width" property but is possible that an ethernet controller also has a property to diff --git a/Documentation/devicetree/bindings/net/smsc,lan9115.yaml b/Documentation/devicetree/bindings/net/smsc,lan9115.yaml new file mode 100644 index 0000000000..f86667cbcc --- /dev/null +++ b/Documentation/devicetree/bindings/net/smsc,lan9115.yaml @@ -0,0 +1,110 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/net/smsc,lan9115.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Smart Mixed-Signal Connectivity (SMSC) LAN911x/912x Controller + +maintainers: + - Shawn Guo + +allOf: + - $ref: ethernet-controller.yaml# + +properties: + compatible: + oneOf: + - const: smsc,lan9115 + - items: + - enum: + - smsc,lan89218 + - smsc,lan9117 + - smsc,lan9118 + - smsc,lan9220 + - smsc,lan9221 + - const: smsc,lan9115 + + reg: + maxItems: 1 + + reg-shift: true + + reg-io-width: + enum: [ 2, 4 ] + default: 2 + + interrupts: + minItems: 1 + items: + - description: + LAN interrupt line + - description: + Optional PME (power management event) interrupt that is able to wake + up the host system with a 50ms pulse on network activity + + clocks: + maxItems: 1 + + phy-mode: true + + smsc,irq-active-high: + type: boolean + description: Indicates the IRQ polarity is active-high + + smsc,irq-push-pull: + type: boolean + description: Indicates the IRQ type is push-pull + + smsc,force-internal-phy: + type: boolean + description: Forces SMSC LAN controller to use internal PHY + + smsc,force-external-phy: + type: boolean + description: Forces SMSC LAN controller to use external PHY + + smsc,save-mac-address: + type: boolean + description: + Indicates that MAC address needs to be saved before resetting the + controller + + reset-gpios: + maxItems: 1 + description: + A GPIO line connected to the RESET (active low) signal of the device. + On many systems this is wired high so the device goes out of reset at + power-on, but if it is under program control, this optional GPIO can + wake up in response to it. + + vdd33a-supply: + description: 3.3V analog power supply + + vddvario-supply: + description: IO logic power supply + +required: + - compatible + - reg + - interrupts + +# There are lots of bus-specific properties ("qcom,*", "samsung,*", "fsl,*", +# "gpmc,*", ...) to be found, that actually depend on the compatible value of +# the parent node. +additionalProperties: true + +examples: + - | + #include + + ethernet@f4000000 { + compatible = "smsc,lan9220", "smsc,lan9115"; + reg = <0xf4000000 0x2000000>; + phy-mode = "mii"; + interrupt-parent = <&gpio1>; + interrupts = <31>, <32>; + reset-gpios = <&gpio1 30 GPIO_ACTIVE_LOW>; + reg-io-width = <4>; + smsc,irq-push-pull; + }; diff --git a/Documentation/devicetree/bindings/phy/ti,phy-j721e-wiz.yaml b/Documentation/devicetree/bindings/phy/ti,phy-j721e-wiz.yaml index 5272b6f284..dcd63908ae 100644 --- a/Documentation/devicetree/bindings/phy/ti,phy-j721e-wiz.yaml +++ b/Documentation/devicetree/bindings/phy/ti,phy-j721e-wiz.yaml @@ -77,6 +77,34 @@ properties: Type-C spec states minimum CC pin debounce of 100 ms and maximum of 200 ms. However, some solutions might need more than 200 ms. + refclk-dig: + type: object + description: | + WIZ node should have subnode for refclk_dig to select the reference + clock source for the reference clock used in the PHY and PMA digital + logic. + properties: + clocks: + minItems: 2 + maxItems: 4 + description: Phandle to two (Torrent) or four (Sierra) clock nodes representing + the inputs to refclk_dig + + "#clock-cells": + const: 0 + + assigned-clocks: + maxItems: 1 + + assigned-clock-parents: + maxItems: 1 + + required: + - clocks + - "#clock-cells" + - assigned-clocks + - assigned-clock-parents + patternProperties: "^pll[0|1]-refclk$": type: object @@ -121,34 +149,6 @@ patternProperties: - clocks - "#clock-cells" - "^refclk-dig$": - type: object - description: | - WIZ node should have subnode for refclk_dig to select the reference - clock source for the reference clock used in the PHY and PMA digital - logic. - properties: - clocks: - minItems: 2 - maxItems: 4 - description: Phandle to two (Torrent) or four (Sierra) clock nodes representing - the inputs to refclk_dig - - "#clock-cells": - const: 0 - - assigned-clocks: - maxItems: 1 - - assigned-clock-parents: - maxItems: 1 - - required: - - clocks - - "#clock-cells" - - assigned-clocks - - assigned-clock-parents - "^serdes@[0-9a-f]+$": type: object description: | diff --git a/Documentation/devicetree/bindings/regulator/mps,mpq7920.yaml b/Documentation/devicetree/bindings/regulator/mps,mpq7920.yaml index 12b8963615..c2e8c54e53 100644 --- a/Documentation/devicetree/bindings/regulator/mps,mpq7920.yaml +++ b/Documentation/devicetree/bindings/regulator/mps,mpq7920.yaml @@ -36,12 +36,12 @@ properties: switching frequency must be one of following corresponding value 1.1MHz, 1.65MHz, 2.2MHz, 2.75MHz - patternProperties: - "^ldo[1-4]$": + ldortc: type: object $ref: regulator.yaml# - "^ldortc$": + patternProperties: + "^ldo[1-4]$": type: object $ref: regulator.yaml# diff --git a/Documentation/devicetree/bindings/regulator/nxp,pf8x00-regulator.yaml b/Documentation/devicetree/bindings/regulator/nxp,pf8x00-regulator.yaml index 8761437ed8..aabf50f5b3 100644 --- a/Documentation/devicetree/bindings/regulator/nxp,pf8x00-regulator.yaml +++ b/Documentation/devicetree/bindings/regulator/nxp,pf8x00-regulator.yaml @@ -83,7 +83,8 @@ properties: unevaluatedProperties: false - "^vsnvs$": + properties: + vsnvs: type: object $ref: regulator.yaml# description: diff --git a/Documentation/devicetree/bindings/rtc/faraday,ftrtc010.yaml b/Documentation/devicetree/bindings/rtc/faraday,ftrtc010.yaml index 657c13b62b..056d42daae 100644 --- a/Documentation/devicetree/bindings/rtc/faraday,ftrtc010.yaml +++ b/Documentation/devicetree/bindings/rtc/faraday,ftrtc010.yaml @@ -30,7 +30,6 @@ properties: maxItems: 1 clocks: - minItems: 2 items: - description: PCLK clocks - description: EXTCLK clocks. Faraday calls it CLK1HZ and says the clock diff --git a/Documentation/devicetree/bindings/spi/spi-controller.yaml b/Documentation/devicetree/bindings/spi/spi-controller.yaml index faef4f6f55..8246891602 100644 --- a/Documentation/devicetree/bindings/spi/spi-controller.yaml +++ b/Documentation/devicetree/bindings/spi/spi-controller.yaml @@ -79,22 +79,7 @@ properties: description: The SPI controller acts as a slave, instead of a master. -allOf: - - if: - not: - required: - - spi-slave - then: - properties: - "#address-cells": - const: 1 - else: - properties: - "#address-cells": - const: 0 - -patternProperties: - "^slave$": + slave: type: object properties: @@ -105,6 +90,7 @@ patternProperties: required: - compatible +patternProperties: "^.*@[0-9a-f]+$": type: object @@ -180,6 +166,20 @@ patternProperties: - compatible - reg +allOf: + - if: + not: + required: + - spi-slave + then: + properties: + "#address-cells": + const: 1 + else: + properties: + "#address-cells": + const: 0 + additionalProperties: true examples: diff --git a/Documentation/devicetree/bindings/usb/nxp,isp1760.yaml b/Documentation/devicetree/bindings/usb/nxp,isp1760.yaml index a88f99adfe..f238848ad0 100644 --- a/Documentation/devicetree/bindings/usb/nxp,isp1760.yaml +++ b/Documentation/devicetree/bindings/usb/nxp,isp1760.yaml @@ -25,14 +25,12 @@ properties: interrupts: minItems: 1 - maxItems: 2 items: - description: Host controller interrupt - description: Device controller interrupt in isp1761 interrupt-names: minItems: 1 - maxItems: 2 items: - const: host - const: peripheral diff --git a/Documentation/driver-api/early-userspace/early_userspace_support.rst b/Documentation/driver-api/early-userspace/early_userspace_support.rst index 8a58c61932..61bdeac1ba 100644 --- a/Documentation/driver-api/early-userspace/early_userspace_support.rst +++ b/Documentation/driver-api/early-userspace/early_userspace_support.rst @@ -69,17 +69,17 @@ early userspace image can be built by an unprivileged user. As a technical note, when directories and files are specified, the entire CONFIG_INITRAMFS_SOURCE is passed to -usr/gen_initramfs_list.sh. This means that CONFIG_INITRAMFS_SOURCE +usr/gen_initramfs.sh. This means that CONFIG_INITRAMFS_SOURCE can really be interpreted as any legal argument to -gen_initramfs_list.sh. If a directory is specified as an argument then +gen_initramfs.sh. If a directory is specified as an argument then the contents are scanned, uid/gid translation is performed, and usr/gen_init_cpio file directives are output. If a directory is -specified as an argument to usr/gen_initramfs_list.sh then the +specified as an argument to usr/gen_initramfs.sh then the contents of the file are simply copied to the output. All of the output directives from directory scanning and file contents copying are processed by usr/gen_init_cpio. -See also 'usr/gen_initramfs_list.sh -h'. +See also 'usr/gen_initramfs.sh -h'. Where's this all leading? ========================= diff --git a/Documentation/features/core/thread-info-in-task/arch-support.txt b/Documentation/features/core/thread-info-in-task/arch-support.txt new file mode 100644 index 0000000000..9f0259bbd7 --- /dev/null +++ b/Documentation/features/core/thread-info-in-task/arch-support.txt @@ -0,0 +1,32 @@ +# +# Feature name: thread-info-in-task +# Kconfig: THREAD_INFO_IN_TASK +# description: arch makes use of the core kernel facility to embedd thread_info in task_struct +# + ----------------------- + | arch |status| + ----------------------- + | alpha: | TODO | + | arc: | TODO | + | arm: | TODO | + | arm64: | ok | + | csky: | TODO | + | h8300: | TODO | + | hexagon: | TODO | + | ia64: | TODO | + | m68k: | TODO | + | microblaze: | TODO | + | mips: | TODO | + | nds32: | ok | + | nios2: | TODO | + | openrisc: | TODO | + | parisc: | TODO | + | powerpc: | ok | + | riscv: | ok | + | s390: | ok | + | sh: | TODO | + | sparc: | TODO | + | um: | TODO | + | x86: | ok | + | xtensa: | TODO | + ----------------------- diff --git a/Documentation/filesystems/ramfs-rootfs-initramfs.rst b/Documentation/filesystems/ramfs-rootfs-initramfs.rst index 4598b0d90b..1649606319 100644 --- a/Documentation/filesystems/ramfs-rootfs-initramfs.rst +++ b/Documentation/filesystems/ramfs-rootfs-initramfs.rst @@ -170,7 +170,7 @@ Documentation/driver-api/early-userspace/early_userspace_support.rst for more de The kernel does not depend on external cpio tools. If you specify a directory instead of a configuration file, the kernel's build infrastructure creates a configuration file from that directory (usr/Makefile calls -usr/gen_initramfs_list.sh), and proceeds to package up that directory +usr/gen_initramfs.sh), and proceeds to package up that directory using the config file (by feeding it to usr/gen_init_cpio, which is created from usr/gen_init_cpio.c). The kernel's build-time cpio creation code is entirely self-contained, and the kernel's boot-time extractor is also diff --git a/Documentation/networking/ethtool-netlink.rst b/Documentation/networking/ethtool-netlink.rst index 6ea91e4159..c86628e6a2 100644 --- a/Documentation/networking/ethtool-netlink.rst +++ b/Documentation/networking/ethtool-netlink.rst @@ -212,6 +212,7 @@ Userspace to kernel: ``ETHTOOL_MSG_FEC_SET`` set FEC settings ``ETHTOOL_MSG_MODULE_EEPROM_GET`` read SFP module EEPROM ``ETHTOOL_MSG_STATS_GET`` get standard statistics + ``ETHTOOL_MSG_PHC_VCLOCKS_GET`` get PHC virtual clocks info ===================================== ================================ Kernel to userspace: @@ -250,6 +251,7 @@ Kernel to userspace: ``ETHTOOL_MSG_FEC_NTF`` FEC settings ``ETHTOOL_MSG_MODULE_EEPROM_GET_REPLY`` read SFP module EEPROM ``ETHTOOL_MSG_STATS_GET_REPLY`` standard statistics + ``ETHTOOL_MSG_PHC_VCLOCKS_GET_REPLY`` PHC virtual clocks info ======================================== ================================= ``GET`` requests are sent by userspace applications to retrieve device @@ -1477,6 +1479,25 @@ Low and high bounds are inclusive, for example: etherStatsPkts512to1023Octets 512 1023 ============================= ==== ==== +PHC_VCLOCKS_GET +=============== + +Query device PHC virtual clocks information. + +Request contents: + + ==================================== ====== ========================== + ``ETHTOOL_A_PHC_VCLOCKS_HEADER`` nested request header + ==================================== ====== ========================== + +Kernel response contents: + + ==================================== ====== ========================== + ``ETHTOOL_A_PHC_VCLOCKS_HEADER`` nested reply header + ``ETHTOOL_A_PHC_VCLOCKS_NUM`` u32 PHC virtual clocks number + ``ETHTOOL_A_PHC_VCLOCKS_INDEX`` s32 PHC index array + ==================================== ====== ========================== + Request translation =================== @@ -1575,4 +1596,5 @@ are netlink only. n/a ``ETHTOOL_MSG_CABLE_TEST_ACT`` n/a ``ETHTOOL_MSG_CABLE_TEST_TDR_ACT`` n/a ``ETHTOOL_MSG_TUNNEL_INFO_GET`` + n/a ``ETHTOOL_MSG_PHC_VCLOCKS_GET`` =================================== ===================================== diff --git a/Documentation/networking/nf_conntrack-sysctl.rst b/Documentation/networking/nf_conntrack-sysctl.rst index 0467b30e4a..d31ed6c1cb 100644 --- a/Documentation/networking/nf_conntrack-sysctl.rst +++ b/Documentation/networking/nf_conntrack-sysctl.rst @@ -110,6 +110,12 @@ nf_conntrack_tcp_be_liberal - BOOLEAN Be conservative in what you do, be liberal in what you accept from others. If it's non-zero, we mark only out of window RST segments as INVALID. +nf_conntrack_tcp_ignore_invalid_rst - BOOLEAN + - 0 - disabled (default) + - 1 - enabled + + If it's 1, we don't mark out of window RST segments as INVALID. + nf_conntrack_tcp_loose - BOOLEAN - 0 - disabled - not 0 - enabled (default) diff --git a/Documentation/networking/tipc.rst b/Documentation/networking/tipc.rst index 76775f24cd..ab63d298cc 100644 --- a/Documentation/networking/tipc.rst +++ b/Documentation/networking/tipc.rst @@ -4,10 +4,125 @@ Linux Kernel TIPC ================= -TIPC (Transparent Inter Process Communication) is a protocol that is -specially designed for intra-cluster communication. +Introduction +============ -For more information about TIPC, see http://tipc.sourceforge.net. +TIPC (Transparent Inter Process Communication) is a protocol that is specially +designed for intra-cluster communication. It can be configured to transmit +messages either on UDP or directly across Ethernet. Message delivery is +sequence guaranteed, loss free and flow controlled. Latency times are shorter +than with any other known protocol, while maximal throughput is comparable to +that of TCP. + +TIPC Features +------------- + +- Cluster wide IPC service + + Have you ever wished you had the convenience of Unix Domain Sockets even when + transmitting data between cluster nodes? Where you yourself determine the + addresses you want to bind to and use? Where you don't have to perform DNS + lookups and worry about IP addresses? Where you don't have to start timers + to monitor the continuous existence of peer sockets? And yet without the + downsides of that socket type, such as the risk of lingering inodes? + + Welcome to the Transparent Inter Process Communication service, TIPC in short, + which gives you all of this, and a lot more. + +- Service Addressing + + A fundamental concept in TIPC is that of Service Addressing which makes it + possible for a programmer to chose his own address, bind it to a server + socket and let client programs use only that address for sending messages. + +- Service Tracking + + A client wanting to wait for the availability of a server, uses the Service + Tracking mechanism to subscribe for binding and unbinding/close events for + sockets with the associated service address. + + The service tracking mechanism can also be used for Cluster Topology Tracking, + i.e., subscribing for availability/non-availability of cluster nodes. + + Likewise, the service tracking mechanism can be used for Cluster Connectivity + Tracking, i.e., subscribing for up/down events for individual links between + cluster nodes. + +- Transmission Modes + + Using a service address, a client can send datagram messages to a server socket. + + Using the same address type, it can establish a connection towards an accepting + server socket. + + It can also use a service address to create and join a Communication Group, + which is the TIPC manifestation of a brokerless message bus. + + Multicast with very good performance and scalability is available both in + datagram mode and in communication group mode. + +- Inter Node Links + + Communication between any two nodes in a cluster is maintained by one or two + Inter Node Links, which both guarantee data traffic integrity and monitor + the peer node's availability. + +- Cluster Scalability + + By applying the Overlapping Ring Monitoring algorithm on the inter node links + it is possible to scale TIPC clusters up to 1000 nodes with a maintained + neighbor failure discovery time of 1-2 seconds. For smaller clusters this + time can be made much shorter. + +- Neighbor Discovery + + Neighbor Node Discovery in the cluster is done by Ethernet broadcast or UDP + multicast, when any of those services are available. If not, configured peer + IP addresses can be used. + +- Configuration + + When running TIPC in single node mode no configuration whatsoever is needed. + When running in cluster mode TIPC must as a minimum be given a node address + (before Linux 4.17) and told which interface to attach to. The "tipc" + configuration tool makes is possible to add and maintain many more + configuration parameters. + +- Performance + + TIPC message transfer latency times are better than in any other known protocol. + Maximal byte throughput for inter-node connections is still somewhat lower than + for TCP, while they are superior for intra-node and inter-container throughput + on the same host. + +- Language Support + + The TIPC user API has support for C, Python, Perl, Ruby, D and Go. + +More Information +---------------- + +- How to set up TIPC: + + http://tipc.io/getting_started.html + +- How to program with TIPC: + + http://tipc.io/programming.html + +- How to contribute to TIPC: + +- http://tipc.io/contacts.html + +- More details about TIPC specification: + + http://tipc.io/protocol.html + + +Implementation +============== + +TIPC is implemented as a kernel module in net/tipc/ directory. TIPC Base Types --------------- diff --git a/Documentation/translations/zh_CN/process/2.Process.rst b/Documentation/translations/zh_CN/process/2.Process.rst index 229629e305..4a6ed02194 100644 --- a/Documentation/translations/zh_CN/process/2.Process.rst +++ b/Documentation/translations/zh_CN/process/2.Process.rst @@ -47,7 +47,7 @@ (顺便说一句,值得注意的是,合并窗口期间集成的更改并不是凭空产生的;它们是经 提前收集、测试和分级的。稍后将详细描述该过程的工作方式。) -合并窗口持续大约两周。在这段时间结束时,LinusTorvalds将声明窗口已关闭,并 +合并窗口持续大约两周。在这段时间结束时,Linus Torvalds将声明窗口已关闭,并 释放第一个“rc”内核。例如,对于目标为5.6的内核,在合并窗口结束时发生的释放 将被称为5.6-rc1。-rc1 版本是一个信号,表示合并新特性的时间已经过去,稳定下一 个内核的时间已经到来。 @@ -168,7 +168,7 @@ Greg Kroah-Hartman领导。稳定团队将使用5.x.y编号方案不定期地发 补丁如何进入内核 ---------------- -只有一个人可以将补丁合并到主线内核存储库中:LinusTorvalds。但是,在进入 +只有一个人可以将补丁合并到主线内核存储库中:Linus Torvalds。但是,在进入 2.6.38内核的9500多个补丁中,只有112个(大约1.3%)是由Linus自己直接选择的。 内核项目已经发展到一个没有一个开发人员可以在没有支持的情况下检查和选择每个 补丁的规模。内核开发人员处理这种增长的方式是使用围绕信任链构建的助理系统。 diff --git a/LICENSES/dual/CC-BY-4.0 b/LICENSES/dual/CC-BY-4.0 index 45a81b8e46..869cad3d16 100644 --- a/LICENSES/dual/CC-BY-4.0 +++ b/LICENSES/dual/CC-BY-4.0 @@ -392,7 +392,7 @@ Section 8 -- Interpretation. Creative Commons is not a party to its public licenses. Notwithstanding, Creative Commons may elect to apply one of its public licenses to material it publishes and in those instances -will be considered the “Licensor.” The text of the Creative Commons +will be considered the "Licensor." The text of the Creative Commons public licenses is dedicated to the public domain under the CC0 Public Domain Dedication. Except for the limited purpose of indicating that material is shared under a Creative Commons public license or as diff --git a/Makefile b/Makefile index 069607cfe2..e4f5895bad 100644 --- a/Makefile +++ b/Makefile @@ -1,8 +1,8 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 5 -PATCHLEVEL = 13 -SUBLEVEL = 1 -EXTRAVERSION = +PATCHLEVEL = 14 +SUBLEVEL = 0 +EXTRAVERSION = -rc2 NAME = Opossums on Parade # *DOCUMENTATION* @@ -129,6 +129,11 @@ endif $(if $(word 2, $(KBUILD_EXTMOD)), \ $(error building multiple external modules is not supported)) +# Remove trailing slashes +ifneq ($(filter %/, $(KBUILD_EXTMOD)),) +KBUILD_EXTMOD := $(shell dirname $(KBUILD_EXTMOD).) +endif + export KBUILD_EXTMOD # Kbuild will save output files in the current working directory. @@ -544,14 +549,21 @@ scripts_basic: $(Q)rm -f .tmp_quiet_recordmcount PHONY += outputmakefile +ifdef building_out_of_srctree # Before starting out-of-tree build, make sure the source tree is clean. # outputmakefile generates a Makefile in the output directory, if using a # separate output directory. This allows convenient use of make in the # output directory. # At the same time when output Makefile generated, generate .gitignore to # ignore whole output directory + +quiet_cmd_makefile = GEN Makefile + cmd_makefile = { \ + echo "\# Automatically generated by $(srctree)/Makefile: don't edit"; \ + echo "include $(srctree)/Makefile"; \ + } > Makefile + outputmakefile: -ifdef building_out_of_srctree $(Q)if [ -f $(srctree)/.config -o \ -d $(srctree)/include/config -o \ -d $(srctree)/arch/$(SRCARCH)/include/generated ]; then \ @@ -562,7 +574,7 @@ ifdef building_out_of_srctree false; \ fi $(Q)ln -fsn $(srctree) source - $(Q)$(CONFIG_SHELL) $(srctree)/scripts/mkmakefile $(srctree) + $(call cmd,makefile) $(Q)test -e .gitignore || \ { echo "# this is build directory, ignore it"; echo "*"; } > .gitignore endif @@ -658,7 +670,7 @@ endif ifeq ($(KBUILD_EXTMOD),) # Objects we will link into vmlinux / subdirs we need to visit -core-y := init/ usr/ +core-y := init/ usr/ arch/$(SRCARCH)/ drivers-y := drivers/ sound/ drivers-$(CONFIG_SAMPLES) += samples/ drivers-$(CONFIG_NET) += net/ @@ -716,11 +728,12 @@ $(KCONFIG_CONFIG): # This exploits the 'multi-target pattern rule' trick. # The syncconfig should be executed only once to make all the targets. # (Note: use the grouped target '&:' when we bump to GNU Make 4.3) -quiet_cmd_syncconfig = SYNC $@ - cmd_syncconfig = $(MAKE) -f $(srctree)/Makefile syncconfig - +# +# Do not use $(call cmd,...) here. That would suppress prompts from syncconfig, +# so you cannot notice that Kconfig is waiting for the user input. %/config/auto.conf %/config/auto.conf.cmd %/generated/autoconf.h: $(KCONFIG_CONFIG) - +$(call cmd,syncconfig) + $(Q)$(kecho) " SYNC $@" + $(Q)$(MAKE) -f $(srctree)/Makefile syncconfig else # !may-sync-config # External modules and some install targets need include/generated/autoconf.h # and include/config/auto.conf but do not care if they are up-to-date. @@ -790,7 +803,7 @@ else # Warn about unmarked fall-throughs in switch statement. # Disabled for clang while comment to attribute conversion happens and # https://github.com/ClangBuiltLinux/linux/issues/636 is discussed. -KBUILD_CFLAGS += $(call cc-option,-Wimplicit-fallthrough,) +KBUILD_CFLAGS += $(call cc-option,-Wimplicit-fallthrough=5,) endif # These warnings generated too much noise in a regular build. @@ -961,8 +974,8 @@ KBUILD_CFLAGS += $(CC_FLAGS_CFI) export CC_FLAGS_CFI endif -ifdef CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_32B -KBUILD_CFLAGS += -falign-functions=32 +ifdef CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B +KBUILD_CFLAGS += -falign-functions=64 endif # arch Makefile may override CC so keep this after arch Makefile is included @@ -1039,7 +1052,7 @@ LDFLAGS_vmlinux += $(call ld-option, -X,) endif ifeq ($(CONFIG_RELR),y) -LDFLAGS_vmlinux += --pack-dyn-relocs=relr +LDFLAGS_vmlinux += --pack-dyn-relocs=relr --use-android-relr-tags endif # We never want expected sections to be placed heuristically by the @@ -1089,41 +1102,6 @@ export INSTALL_DTBS_PATH ?= $(INSTALL_PATH)/dtbs/$(KERNELRELEASE) MODLIB = $(INSTALL_MOD_PATH)/lib/modules/$(KERNELRELEASE) export MODLIB -HOST_LIBELF_LIBS = $(shell pkg-config libelf --libs 2>/dev/null || echo -lelf) - -has_libelf = $(call try-run,\ - echo "int main() {}" | $(HOSTCC) $(KBUILD_HOSTLDFLAGS) -xc -o /dev/null $(HOST_LIBELF_LIBS) -,1,0) - -ifdef CONFIG_STACK_VALIDATION - ifeq ($(has_libelf),1) - objtool_target := tools/objtool FORCE - else - SKIP_STACK_VALIDATION := 1 - export SKIP_STACK_VALIDATION - endif -endif - -PHONY += resolve_btfids_clean - -resolve_btfids_O = $(abspath $(objtree))/tools/bpf/resolve_btfids - -# tools/bpf/resolve_btfids directory might not exist -# in output directory, skip its clean in that case -resolve_btfids_clean: -ifneq ($(wildcard $(resolve_btfids_O)),) - $(Q)$(MAKE) -sC $(srctree)/tools/bpf/resolve_btfids O=$(resolve_btfids_O) clean -endif - -ifdef CONFIG_BPF -ifdef CONFIG_DEBUG_INFO_BTF - ifeq ($(has_libelf),1) - resolve_btfids_target := tools/bpf/resolve_btfids FORCE - else - ERROR_RESOLVE_BTFIDS := 1 - endif -endif # CONFIG_DEBUG_INFO_BTF -endif # CONFIG_BPF - PHONY += prepare0 export extmod_prefix = $(if $(KBUILD_EXTMOD),$(KBUILD_EXTMOD)/) @@ -1235,7 +1213,7 @@ prepare0: archprepare $(Q)$(MAKE) $(build)=. # All the preparing.. -prepare: prepare0 prepare-objtool prepare-resolve_btfids +prepare: prepare0 PHONY += remove-stale-files remove-stale-files: @@ -1252,26 +1230,6 @@ uapi-asm-generic: $(Q)$(MAKE) $(asm-generic)=arch/$(SRCARCH)/include/generated/uapi/asm \ generic=include/uapi/asm-generic -PHONY += prepare-objtool prepare-resolve_btfids -prepare-objtool: $(objtool_target) -ifeq ($(SKIP_STACK_VALIDATION),1) -ifdef CONFIG_FTRACE_MCOUNT_USE_OBJTOOL - @echo "error: Cannot generate __mcount_loc for CONFIG_DYNAMIC_FTRACE=y, please install libelf-dev, libelf-devel or elfutils-libelf-devel" >&2 - @false -endif -ifdef CONFIG_UNWINDER_ORC - @echo "error: Cannot generate ORC metadata for CONFIG_UNWINDER_ORC=y, please install libelf-dev, libelf-devel or elfutils-libelf-devel" >&2 - @false -else - @echo "warning: Cannot use CONFIG_STACK_VALIDATION=y, please install libelf-dev, libelf-devel or elfutils-libelf-devel" >&2 -endif -endif - -prepare-resolve_btfids: $(resolve_btfids_target) -ifeq ($(ERROR_RESOLVE_BTFIDS),1) - @echo "error: Cannot resolve BTF IDs for CONFIG_DEBUG_INFO_BTF, please install libelf-dev, libelf-devel or elfutils-libelf-devel" >&2 - @false -endif # Generate some files # --------------------------------------------------------------------------- @@ -1359,6 +1317,43 @@ PHONY += scripts_unifdef scripts_unifdef: scripts_basic $(Q)$(MAKE) $(build)=scripts scripts/unifdef +# --------------------------------------------------------------------------- +# Tools + +ifdef CONFIG_STACK_VALIDATION +prepare: tools/objtool +endif + +ifdef CONFIG_BPF +ifdef CONFIG_DEBUG_INFO_BTF +prepare: tools/bpf/resolve_btfids +endif +endif + +PHONY += resolve_btfids_clean + +resolve_btfids_O = $(abspath $(objtree))/tools/bpf/resolve_btfids + +# tools/bpf/resolve_btfids directory might not exist +# in output directory, skip its clean in that case +resolve_btfids_clean: +ifneq ($(wildcard $(resolve_btfids_O)),) + $(Q)$(MAKE) -sC $(srctree)/tools/bpf/resolve_btfids O=$(resolve_btfids_O) clean +endif + +# Clear a bunch of variables before executing the submake +ifeq ($(quiet),silent_) +tools_silent=s +endif + +tools/: FORCE + $(Q)mkdir -p $(objtree)/tools + $(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(tools_silent) $(filter --j% -j,$(MAKEFLAGS))" O=$(abspath $(objtree)) subdir=tools -C $(srctree)/tools/ + +tools/%: FORCE + $(Q)mkdir -p $(objtree)/tools + $(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(tools_silent) $(filter --j% -j,$(MAKEFLAGS))" O=$(abspath $(objtree)) subdir=tools -C $(srctree)/tools/ $* + # --------------------------------------------------------------------------- # Kernel selftest @@ -1959,20 +1954,6 @@ kernelversion: image_name: @echo $(KBUILD_IMAGE) -# Clear a bunch of variables before executing the submake - -ifeq ($(quiet),silent_) -tools_silent=s -endif - -tools/: FORCE - $(Q)mkdir -p $(objtree)/tools - $(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(tools_silent) $(filter --j% -j,$(MAKEFLAGS))" O=$(abspath $(objtree)) subdir=tools -C $(srctree)/tools/ - -tools/%: FORCE - $(Q)mkdir -p $(objtree)/tools - $(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(tools_silent) $(filter --j% -j,$(MAKEFLAGS))" O=$(abspath $(objtree)) subdir=tools -C $(srctree)/tools/ $* - quiet_cmd_rmfiles = $(if $(wildcard $(rm-files)),CLEAN $(wildcard $(rm-files))) cmd_rmfiles = rm -rf $(rm-files) diff --git a/arch/arm/boot/dts/aspeed-bmc-asrock-e3c246d4i.dts b/arch/arm/boot/dts/aspeed-bmc-asrock-e3c246d4i.dts index 33e413ca07..9b4cf5ebe6 100644 --- a/arch/arm/boot/dts/aspeed-bmc-asrock-e3c246d4i.dts +++ b/arch/arm/boot/dts/aspeed-bmc-asrock-e3c246d4i.dts @@ -4,6 +4,7 @@ #include "aspeed-g5.dtsi" #include #include +#include /{ model = "ASRock E3C246D4I BMC"; @@ -73,7 +74,8 @@ &uart5 { &vuart { status = "okay"; - aspeed,sirq-active-high; + aspeed,lpc-io-reg = <0x2f8>; + aspeed,lpc-interrupts = <3 IRQ_TYPE_LEVEL_HIGH>; }; &mac0 { diff --git a/arch/arm/boot/dts/aspeed-bmc-ibm-everest.dts b/arch/arm/boot/dts/aspeed-bmc-ibm-everest.dts index d26a9e16ff..aa24cac8e5 100644 --- a/arch/arm/boot/dts/aspeed-bmc-ibm-everest.dts +++ b/arch/arm/boot/dts/aspeed-bmc-ibm-everest.dts @@ -406,15 +406,15 @@ power-supply@69 { reg = <0x69>; }; - power-supply@6a { - compatible = "ibm,cffps"; - reg = <0x6a>; - }; - power-supply@6b { compatible = "ibm,cffps"; reg = <0x6b>; }; + + power-supply@6d { + compatible = "ibm,cffps"; + reg = <0x6d>; + }; }; &i2c4 { @@ -2832,6 +2832,7 @@ &pinctrl_emmc_default { &emmc { status = "okay"; + clk-phase-mmc-hs200 = <180>, <180>; }; &fsim0 { diff --git a/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts b/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts index 941c048947..481d0ee1f8 100644 --- a/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts +++ b/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts @@ -280,10 +280,7 @@ &gpio0 { /*W0-W7*/ "","","","","","","","", /*X0-X7*/ "","","","","","","","", /*Y0-Y7*/ "","","","","","","","", - /*Z0-Z7*/ "","","","","","","","", - /*AA0-AA7*/ "","","","","","","","", - /*AB0-AB7*/ "","","","","","","","", - /*AC0-AC7*/ "","","","","","","",""; + /*Z0-Z7*/ "","","","","","","",""; pin_mclr_vpp { gpio-hog; diff --git a/arch/arm/boot/dts/aspeed-bmc-opp-tacoma.dts b/arch/arm/boot/dts/aspeed-bmc-opp-tacoma.dts index e863ec0889..e33153dcae 100644 --- a/arch/arm/boot/dts/aspeed-bmc-opp-tacoma.dts +++ b/arch/arm/boot/dts/aspeed-bmc-opp-tacoma.dts @@ -136,10 +136,7 @@ &gpio0 { /*W0-W7*/ "","","","","","","","", /*X0-X7*/ "","","","","","","","", /*Y0-Y7*/ "","","","","","","","", - /*Z0-Z7*/ "","","","","","","","", - /*AA0-AA7*/ "","","","","","","","", - /*AB0-AB7*/ "","","","","","","","", - /*AC0-AC7*/ "","","","","","","",""; + /*Z0-Z7*/ "","","","","","","",""; }; &fmc { @@ -189,6 +186,7 @@ &emmc_controller { &emmc { status = "okay"; + clk-phase-mmc-hs200 = <36>, <270>; }; &fsim0 { diff --git a/arch/arm/boot/dts/qcom-apq8060-dragonboard.dts b/arch/arm/boot/dts/qcom-apq8060-dragonboard.dts index dace8ffeb9..0a4ffd10c4 100644 --- a/arch/arm/boot/dts/qcom-apq8060-dragonboard.dts +++ b/arch/arm/boot/dts/qcom-apq8060-dragonboard.dts @@ -581,7 +581,7 @@ external-bus@1a100000 { * EBI2. This has a 25MHz chrystal next to it, so no * clocking is needed. */ - ethernet-ebi2@2,0 { + ethernet@2,0 { compatible = "smsc,lan9221", "smsc,lan9115"; reg = <2 0x0 0x100>; /* @@ -598,8 +598,6 @@ ethernet-ebi2@2,0 { phy-mode = "mii"; reg-io-width = <2>; smsc,force-external-phy; - /* IRQ on edge falling = active low */ - smsc,irq-active-low; smsc,irq-push-pull; /* diff --git a/arch/arm/boot/dts/versatile-ab.dts b/arch/arm/boot/dts/versatile-ab.dts index 37bd41ff8d..151c022004 100644 --- a/arch/arm/boot/dts/versatile-ab.dts +++ b/arch/arm/boot/dts/versatile-ab.dts @@ -195,16 +195,15 @@ amba { #size-cells = <1>; ranges; - vic: intc@10140000 { + vic: interrupt-controller@10140000 { compatible = "arm,versatile-vic"; interrupt-controller; #interrupt-cells = <1>; reg = <0x10140000 0x1000>; - clear-mask = <0xffffffff>; valid-mask = <0xffffffff>; }; - sic: intc@10003000 { + sic: interrupt-controller@10003000 { compatible = "arm,versatile-sic"; interrupt-controller; #interrupt-cells = <1>; diff --git a/arch/arm/boot/dts/versatile-pb.dts b/arch/arm/boot/dts/versatile-pb.dts index 06a0fdf240..e7e751a858 100644 --- a/arch/arm/boot/dts/versatile-pb.dts +++ b/arch/arm/boot/dts/versatile-pb.dts @@ -7,7 +7,7 @@ / { amba { /* The Versatile PB is using more SIC IRQ lines than the AB */ - sic: intc@10003000 { + sic: interrupt-controller@10003000 { clear-mask = <0xffffffff>; /* * Valid interrupt lines mask according to diff --git a/arch/arm/configs/integrator_defconfig b/arch/arm/configs/integrator_defconfig index b06e537d51..4dfe321a79 100644 --- a/arch/arm/configs/integrator_defconfig +++ b/arch/arm/configs/integrator_defconfig @@ -57,10 +57,7 @@ CONFIG_DRM=y CONFIG_DRM_DISPLAY_CONNECTOR=y CONFIG_DRM_SIMPLE_BRIDGE=y CONFIG_DRM_PL111=y -CONFIG_FB_MODE_HELPERS=y -CONFIG_FB_MATROX=y -CONFIG_FB_MATROX_MILLENIUM=y -CONFIG_FB_MATROX_MYSTIQUE=y +CONFIG_FB=y CONFIG_BACKLIGHT_CLASS_DEVICE=y # CONFIG_VGA_CONSOLE is not set CONFIG_LOGO=y diff --git a/arch/arm/configs/realview_defconfig b/arch/arm/configs/realview_defconfig index 483c400dd3..4c01e31309 100644 --- a/arch/arm/configs/realview_defconfig +++ b/arch/arm/configs/realview_defconfig @@ -64,11 +64,9 @@ CONFIG_DRM_PANEL_SIMPLE=y CONFIG_DRM_DISPLAY_CONNECTOR=y CONFIG_DRM_SIMPLE_BRIDGE=y CONFIG_DRM_PL111=y -CONFIG_FB_MODE_HELPERS=y +CONFIG_FB=y CONFIG_BACKLIGHT_CLASS_DEVICE=y CONFIG_LOGO=y -# CONFIG_LOGO_LINUX_MONO is not set -# CONFIG_LOGO_LINUX_VGA16 is not set CONFIG_SOUND=y CONFIG_SND=y # CONFIG_SND_DRIVERS is not set diff --git a/arch/arm/configs/shmobile_defconfig b/arch/arm/configs/shmobile_defconfig index 66c8b0980a..d9a27e4e09 100644 --- a/arch/arm/configs/shmobile_defconfig +++ b/arch/arm/configs/shmobile_defconfig @@ -135,6 +135,7 @@ CONFIG_DRM_SII902X=y CONFIG_DRM_SIMPLE_BRIDGE=y CONFIG_DRM_I2C_ADV7511=y CONFIG_DRM_I2C_ADV7511_AUDIO=y +CONFIG_FB=y CONFIG_FB_SH_MOBILE_LCDC=y CONFIG_BACKLIGHT_PWM=y CONFIG_BACKLIGHT_AS3711=y diff --git a/arch/arm/configs/u8500_defconfig b/arch/arm/configs/u8500_defconfig index dbb1ef6017..3b30913d7d 100644 --- a/arch/arm/configs/u8500_defconfig +++ b/arch/arm/configs/u8500_defconfig @@ -61,6 +61,10 @@ CONFIG_INPUT_TOUCHSCREEN=y CONFIG_TOUCHSCREEN_ATMEL_MXT=y CONFIG_TOUCHSCREEN_BU21013=y CONFIG_TOUCHSCREEN_CY8CTMA140=y +CONFIG_TOUCHSCREEN_CYTTSP_CORE=y +CONFIG_TOUCHSCREEN_CYTTSP_SPI=y +CONFIG_TOUCHSCREEN_MMS114=y +CONFIG_TOUCHSCREEN_ZINITIX=y CONFIG_INPUT_MISC=y CONFIG_INPUT_AB8500_PONKEY=y CONFIG_INPUT_GPIO_VIBRA=y @@ -100,6 +104,7 @@ CONFIG_DRM_PANEL_SAMSUNG_S6E63M0_DSI=y CONFIG_DRM_PANEL_SONY_ACX424AKP=y CONFIG_DRM_LIMA=y CONFIG_DRM_MCDE=y +CONFIG_FB=y CONFIG_BACKLIGHT_CLASS_DEVICE=y CONFIG_BACKLIGHT_KTD253=y CONFIG_BACKLIGHT_GPIO=y diff --git a/arch/arm/configs/versatile_defconfig b/arch/arm/configs/versatile_defconfig index e7ecfb365e..b703f47570 100644 --- a/arch/arm/configs/versatile_defconfig +++ b/arch/arm/configs/versatile_defconfig @@ -60,7 +60,7 @@ CONFIG_DRM_PANEL_SIMPLE=y CONFIG_DRM_DISPLAY_CONNECTOR=y CONFIG_DRM_SIMPLE_BRIDGE=y CONFIG_DRM_PL111=y -CONFIG_FB_MODE_HELPERS=y +CONFIG_FB=y CONFIG_BACKLIGHT_CLASS_DEVICE=y CONFIG_LOGO=y CONFIG_SOUND=y @@ -88,8 +88,6 @@ CONFIG_NFSD=y CONFIG_NFSD_V3=y CONFIG_NLS_CODEPAGE_850=m CONFIG_NLS_ISO8859_1=m -CONFIG_FONTS=y -CONFIG_FONT_ACORN_8x8=y CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_FS=y CONFIG_DEBUG_KERNEL=y diff --git a/arch/arm/configs/vexpress_defconfig b/arch/arm/configs/vexpress_defconfig index 4479369540..b5e246dd23 100644 --- a/arch/arm/configs/vexpress_defconfig +++ b/arch/arm/configs/vexpress_defconfig @@ -11,9 +11,6 @@ CONFIG_CPUSETS=y # CONFIG_NET_NS is not set CONFIG_BLK_DEV_INITRD=y CONFIG_PROFILING=y -CONFIG_MODULES=y -CONFIG_MODULE_UNLOAD=y -# CONFIG_BLK_DEV_BSG is not set CONFIG_ARCH_VEXPRESS=y CONFIG_ARCH_VEXPRESS_DCSCB=y CONFIG_ARCH_VEXPRESS_TC2_PM=y @@ -23,14 +20,17 @@ CONFIG_MCPM=y CONFIG_VMSPLIT_2G=y CONFIG_NR_CPUS=8 CONFIG_ARM_PSCI=y -CONFIG_CMA=y CONFIG_ZBOOT_ROM_TEXT=0x0 CONFIG_ZBOOT_ROM_BSS=0x0 CONFIG_CMDLINE="console=ttyAMA0" CONFIG_CPU_IDLE=y CONFIG_VFP=y CONFIG_NEON=y +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +# CONFIG_BLK_DEV_BSG is not set # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_CMA=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y @@ -43,7 +43,6 @@ CONFIG_IP_PNP_BOOTP=y CONFIG_NET_9P=y CONFIG_NET_9P_VIRTIO=y CONFIG_DEVTMPFS=y -CONFIG_DMA_CMA=y CONFIG_MTD=y CONFIG_MTD_CMDLINE_PARTS=y CONFIG_MTD_BLOCK=y @@ -59,7 +58,6 @@ CONFIG_VIRTIO_BLK=y CONFIG_BLK_DEV_SD=y CONFIG_SCSI_VIRTIO=y CONFIG_ATA=y -# CONFIG_SATA_PMP is not set CONFIG_NETDEVICES=y CONFIG_VIRTIO_NET=y CONFIG_SMC91X=y @@ -81,11 +79,9 @@ CONFIG_DRM=y CONFIG_DRM_PANEL_SIMPLE=y CONFIG_DRM_SII902X=y CONFIG_DRM_PL111=y -CONFIG_FB_MODE_HELPERS=y +CONFIG_FB=y CONFIG_BACKLIGHT_CLASS_DEVICE=y CONFIG_LOGO=y -# CONFIG_LOGO_LINUX_MONO is not set -# CONFIG_LOGO_LINUX_VGA16 is not set CONFIG_SOUND=y CONFIG_SND=y # CONFIG_SND_DRIVERS is not set @@ -136,10 +132,11 @@ CONFIG_ROOT_NFS=y CONFIG_9P_FS=y CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ISO8859_1=y +# CONFIG_CRYPTO_HW is not set +CONFIG_DMA_CMA=y CONFIG_DEBUG_INFO=y CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_KERNEL=y CONFIG_DETECT_HUNG_TASK=y # CONFIG_SCHED_DEBUG is not set CONFIG_DEBUG_USER=y -# CONFIG_CRYPTO_HW is not set diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index e07e7de9ac..b5b13a9325 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1605,7 +1605,8 @@ config ARM64_BTI_KERNEL depends on CC_HAS_BRANCH_PROT_PAC_RET_BTI # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=94697 depends on !CC_IS_GCC || GCC_VERSION >= 100100 - depends on !(CC_IS_CLANG && GCOV_KERNEL) + # https://github.com/llvm/llvm-project/commit/a88c722e687e6780dcd6a58718350dc76fcc4cc9 + depends on !CC_IS_CLANG || CLANG_VERSION >= 120000 depends on (!FUNCTION_GRAPH_TRACER || DYNAMIC_FTRACE_WITH_REGS) help Build the kernel with Branch Target Identification annotations diff --git a/arch/arm64/boot/dts/nvidia/tegra194.dtsi b/arch/arm64/boot/dts/nvidia/tegra194.dtsi index b7d5328413..076d5efc4c 100644 --- a/arch/arm64/boot/dts/nvidia/tegra194.dtsi +++ b/arch/arm64/boot/dts/nvidia/tegra194.dtsi @@ -948,6 +948,10 @@ usb@3550000 { <&bpmp TEGRA194_CLK_XUSB_SS>, <&bpmp TEGRA194_CLK_XUSB_FS>; clock-names = "dev", "ss", "ss_src", "fs_src"; + interconnects = <&mc TEGRA194_MEMORY_CLIENT_XUSB_DEVR &emc>, + <&mc TEGRA194_MEMORY_CLIENT_XUSB_DEVW &emc>; + interconnect-names = "dma-mem", "write"; + iommus = <&smmu TEGRA194_SID_XUSB_DEV>; power-domains = <&bpmp TEGRA194_POWER_DOMAIN_XUSBB>, <&bpmp TEGRA194_POWER_DOMAIN_XUSBA>; power-domain-names = "dev", "ss"; @@ -977,6 +981,10 @@ usb@3610000 { "xusb_ss", "xusb_ss_src", "xusb_hs_src", "xusb_fs_src", "pll_u_480m", "clk_m", "pll_e"; + interconnects = <&mc TEGRA194_MEMORY_CLIENT_XUSB_HOSTR &emc>, + <&mc TEGRA194_MEMORY_CLIENT_XUSB_HOSTW &emc>; + interconnect-names = "dma-mem", "write"; + iommus = <&smmu TEGRA194_SID_XUSB_HOST>; power-domains = <&bpmp TEGRA194_POWER_DOMAIN_XUSBC>, <&bpmp TEGRA194_POWER_DOMAIN_XUSBA>; @@ -2469,6 +2477,11 @@ sound { * for 8x and 11.025x sample rate streams. */ assigned-clock-rates = <258000000>; + + interconnects = <&mc TEGRA194_MEMORY_CLIENT_APEDMAR &emc>, + <&mc TEGRA194_MEMORY_CLIENT_APEDMAW &emc>; + interconnect-names = "dma-mem", "write"; + iommus = <&smmu TEGRA194_SID_APE>; }; tcu: tcu { diff --git a/arch/arm64/boot/dts/renesas/r9a07g044.dtsi b/arch/arm64/boot/dts/renesas/r9a07g044.dtsi index 734c8adece..01482d2275 100644 --- a/arch/arm64/boot/dts/renesas/r9a07g044.dtsi +++ b/arch/arm64/boot/dts/renesas/r9a07g044.dtsi @@ -82,10 +82,10 @@ scif0: serial@1004b800 { ; interrupt-names = "eri", "rxi", "txi", "bri", "dri", "tei"; - clocks = <&cpg CPG_MOD R9A07G044_CLK_SCIF0>; + clocks = <&cpg CPG_MOD R9A07G044_SCIF0_CLK_PCK>; clock-names = "fck"; power-domains = <&cpg>; - resets = <&cpg R9A07G044_CLK_SCIF0>; + resets = <&cpg R9A07G044_SCIF0_RST_SYSTEM_N>; status = "disabled"; }; diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h index a9c0716e74..a074459f8f 100644 --- a/arch/arm64/include/asm/cache.h +++ b/arch/arm64/include/asm/cache.h @@ -47,7 +47,7 @@ * cache before the transfer is done, causing old data to be seen by * the CPU. */ -#define ARCH_DMA_MINALIGN L1_CACHE_BYTES +#define ARCH_DMA_MINALIGN (128) #ifdef CONFIG_KASAN_SW_TAGS #define ARCH_SLAB_MINALIGN (1ULL << KASAN_SHADOW_SCALE_SHIFT) diff --git a/arch/arm64/include/asm/smp_plat.h b/arch/arm64/include/asm/smp_plat.h index 99ad77df8f..97ddc6c203 100644 --- a/arch/arm64/include/asm/smp_plat.h +++ b/arch/arm64/include/asm/smp_plat.h @@ -10,6 +10,7 @@ #include +#include #include struct mpidr_hash { diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index cce308586f..3f1490bfb9 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -17,7 +17,7 @@ CFLAGS_syscall.o += -fno-stack-protector # It's not safe to invoke KCOV when portions of the kernel environment aren't # available or are out-of-sync with HW state. Since `noinstr` doesn't always # inhibit KCOV instrumentation, disable it for the entire compilation unit. -KCOV_INSTRUMENT_entry.o := n +KCOV_INSTRUMENT_entry-common.o := n KCOV_INSTRUMENT_idle.o := n # Object file lists. diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 125d5c9471..0ead8bfedf 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -81,6 +81,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c index 12ce14a98b..db8b2e2d02 100644 --- a/arch/arm64/kernel/entry-common.c +++ b/arch/arm64/kernel/entry-common.c @@ -604,7 +604,7 @@ asmlinkage void noinstr el0t_64_fiq_handler(struct pt_regs *regs) __el0_fiq_handler_common(regs); } -static void __el0_error_handler_common(struct pt_regs *regs) +static void noinstr __el0_error_handler_common(struct pt_regs *regs) { unsigned long esr = read_sysreg(esr_el1); diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c index 69b3fde875..36f51b0e43 100644 --- a/arch/arm64/kernel/mte.c +++ b/arch/arm64/kernel/mte.c @@ -193,18 +193,6 @@ void mte_check_tfsr_el1(void) } #endif -static void update_gcr_el1_excl(u64 excl) -{ - - /* - * Note that the mask controlled by the user via prctl() is an - * include while GCR_EL1 accepts an exclude mask. - * No need for ISB since this only affects EL0 currently, implicit - * with ERET. - */ - sysreg_clear_set_s(SYS_GCR_EL1, SYS_GCR_EL1_EXCL_MASK, excl); -} - static void set_gcr_el1_excl(u64 excl) { current->thread.gcr_user_excl = excl; @@ -265,7 +253,8 @@ void mte_suspend_exit(void) if (!system_supports_mte()) return; - update_gcr_el1_excl(gcr_kernel_excl); + sysreg_clear_set_s(SYS_GCR_EL1, SYS_GCR_EL1_EXCL_MASK, gcr_kernel_excl); + isb(); } long set_mte_ctrl(struct task_struct *task, unsigned long arg) diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S index 95cd62d673..2cf999e41d 100644 --- a/arch/arm64/lib/copy_from_user.S +++ b/arch/arm64/lib/copy_from_user.S @@ -29,7 +29,7 @@ .endm .macro ldrh1 reg, ptr, val - user_ldst 9998f, ldtrh, \reg, \ptr, \val + user_ldst 9997f, ldtrh, \reg, \ptr, \val .endm .macro strh1 reg, ptr, val @@ -37,7 +37,7 @@ .endm .macro ldr1 reg, ptr, val - user_ldst 9998f, ldtr, \reg, \ptr, \val + user_ldst 9997f, ldtr, \reg, \ptr, \val .endm .macro str1 reg, ptr, val @@ -45,7 +45,7 @@ .endm .macro ldp1 reg1, reg2, ptr, val - user_ldp 9998f, \reg1, \reg2, \ptr, \val + user_ldp 9997f, \reg1, \reg2, \ptr, \val .endm .macro stp1 reg1, reg2, ptr, val @@ -53,8 +53,10 @@ .endm end .req x5 +srcin .req x15 SYM_FUNC_START(__arch_copy_from_user) add end, x0, x2 + mov srcin, x1 #include "copy_template.S" mov x0, #0 // Nothing to copy ret @@ -63,6 +65,11 @@ EXPORT_SYMBOL(__arch_copy_from_user) .section .fixup,"ax" .align 2 +9997: cmp dst, dstin + b.ne 9998f + // Before being absolutely sure we couldn't copy anything, try harder +USER(9998f, ldtrb tmp1w, [srcin]) + strb tmp1w, [dst], #1 9998: sub x0, end, dst // bytes not copied ret .previous diff --git a/arch/arm64/lib/copy_in_user.S b/arch/arm64/lib/copy_in_user.S index 1f61cd0df0..dbea3799c3 100644 --- a/arch/arm64/lib/copy_in_user.S +++ b/arch/arm64/lib/copy_in_user.S @@ -30,33 +30,34 @@ .endm .macro ldrh1 reg, ptr, val - user_ldst 9998f, ldtrh, \reg, \ptr, \val + user_ldst 9997f, ldtrh, \reg, \ptr, \val .endm .macro strh1 reg, ptr, val - user_ldst 9998f, sttrh, \reg, \ptr, \val + user_ldst 9997f, sttrh, \reg, \ptr, \val .endm .macro ldr1 reg, ptr, val - user_ldst 9998f, ldtr, \reg, \ptr, \val + user_ldst 9997f, ldtr, \reg, \ptr, \val .endm .macro str1 reg, ptr, val - user_ldst 9998f, sttr, \reg, \ptr, \val + user_ldst 9997f, sttr, \reg, \ptr, \val .endm .macro ldp1 reg1, reg2, ptr, val - user_ldp 9998f, \reg1, \reg2, \ptr, \val + user_ldp 9997f, \reg1, \reg2, \ptr, \val .endm .macro stp1 reg1, reg2, ptr, val - user_stp 9998f, \reg1, \reg2, \ptr, \val + user_stp 9997f, \reg1, \reg2, \ptr, \val .endm end .req x5 - +srcin .req x15 SYM_FUNC_START(__arch_copy_in_user) add end, x0, x2 + mov srcin, x1 #include "copy_template.S" mov x0, #0 ret @@ -65,6 +66,12 @@ EXPORT_SYMBOL(__arch_copy_in_user) .section .fixup,"ax" .align 2 +9997: cmp dst, dstin + b.ne 9998f + // Before being absolutely sure we couldn't copy anything, try harder +USER(9998f, ldtrb tmp1w, [srcin]) +USER(9998f, sttrb tmp1w, [dst]) + add dst, dst, #1 9998: sub x0, end, dst // bytes not copied ret .previous diff --git a/arch/arm64/lib/copy_to_user.S b/arch/arm64/lib/copy_to_user.S index 043da90f5d..9f380eecf6 100644 --- a/arch/arm64/lib/copy_to_user.S +++ b/arch/arm64/lib/copy_to_user.S @@ -32,7 +32,7 @@ .endm .macro strh1 reg, ptr, val - user_ldst 9998f, sttrh, \reg, \ptr, \val + user_ldst 9997f, sttrh, \reg, \ptr, \val .endm .macro ldr1 reg, ptr, val @@ -40,7 +40,7 @@ .endm .macro str1 reg, ptr, val - user_ldst 9998f, sttr, \reg, \ptr, \val + user_ldst 9997f, sttr, \reg, \ptr, \val .endm .macro ldp1 reg1, reg2, ptr, val @@ -48,12 +48,14 @@ .endm .macro stp1 reg1, reg2, ptr, val - user_stp 9998f, \reg1, \reg2, \ptr, \val + user_stp 9997f, \reg1, \reg2, \ptr, \val .endm end .req x5 +srcin .req x15 SYM_FUNC_START(__arch_copy_to_user) add end, x0, x2 + mov srcin, x1 #include "copy_template.S" mov x0, #0 ret @@ -62,6 +64,12 @@ EXPORT_SYMBOL(__arch_copy_to_user) .section .fixup,"ax" .align 2 +9997: cmp dst, dstin + b.ne 9998f + // Before being absolutely sure we couldn't copy anything, try harder + ldrb tmp1w, [srcin] +USER(9998f, sttrb tmp1w, [dst]) + add dst, dst, #1 9998: sub x0, end, dst // bytes not copied ret .previous diff --git a/arch/arm64/lib/strlen.S b/arch/arm64/lib/strlen.S index 35fbdb7d6e..1648790e91 100644 --- a/arch/arm64/lib/strlen.S +++ b/arch/arm64/lib/strlen.S @@ -8,6 +8,7 @@ #include #include +#include /* Assumptions: * @@ -42,7 +43,16 @@ #define REP8_7f 0x7f7f7f7f7f7f7f7f #define REP8_80 0x8080808080808080 +/* + * When KASAN_HW_TAGS is in use, memory is checked at MTE_GRANULE_SIZE + * (16-byte) granularity, and we must ensure that no access straddles this + * alignment boundary. + */ +#ifdef CONFIG_KASAN_HW_TAGS +#define MIN_PAGE_SIZE MTE_GRANULE_SIZE +#else #define MIN_PAGE_SIZE 4096 +#endif /* Since strings are short on average, we check the first 16 bytes of the string for a NUL character. In order to do an unaligned ldp diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h index 08f9dd6903..86310d6e10 100644 --- a/arch/mips/include/asm/fpu.h +++ b/arch/mips/include/asm/fpu.h @@ -76,7 +76,7 @@ static inline int __enable_fpu(enum fpu_mode mode) /* we only have a 32-bit FPU */ return SIGFPE; #endif - fallthrough; + /* fallthrough */ case FPU_32BIT: if (cpu_has_fre) { /* clear FRE */ diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index cd4afcdf37..9adad24c2e 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -1383,6 +1383,7 @@ static void build_r4000_tlb_refill_handler(void) switch (boot_cpu_type()) { default: if (sizeof(long) == 4) { + fallthrough; case CPU_LOONGSON2EF: /* Loongson2 ebase is different than r4k, we have more space */ if ((p - tlb_handler) > 64) @@ -2169,6 +2170,7 @@ static void build_r4000_tlb_load_handler(void) default: if (cpu_has_mips_r2_exec_hazard) { uasm_i_ehb(&p); + fallthrough; case CPU_CAVIUM_OCTEON: case CPU_CAVIUM_OCTEON_PLUS: diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c index bdfea6d6ab..3256a316e8 100644 --- a/arch/powerpc/platforms/powermac/smp.c +++ b/arch/powerpc/platforms/powermac/smp.c @@ -146,6 +146,7 @@ static inline void psurge_clr_ipi(int cpu) switch(psurge_type) { case PSURGE_DUAL: out_8(psurge_sec_intr, ~0); + break; case PSURGE_NONE: break; default: diff --git a/arch/s390/kernel/uprobes.c b/arch/s390/kernel/uprobes.c index bbf8622bbf..bd3ef121c3 100644 --- a/arch/s390/kernel/uprobes.c +++ b/arch/s390/kernel/uprobes.c @@ -126,6 +126,7 @@ int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, case DIE_SSTEP: if (uprobe_post_sstep_notifier(regs)) return NOTIFY_STOP; + break; default: break; } diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index b4da665bb8..739be5da3b 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -202,10 +202,10 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) static_call(kvm_x86_vcpu_after_set_cpuid)(vcpu); /* - * Except for the MMU, which needs to be reset after any vendor - * specific adjustments to the reserved GPA bits. + * Except for the MMU, which needs to do its thing any vendor specific + * adjustments to the reserved GPA bits. */ - kvm_mmu_reset_context(vcpu); + kvm_mmu_after_set_cpuid(vcpu); } static int is_efer_nx(void) @@ -765,7 +765,8 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) edx.split.num_counters_fixed = min(cap.num_counters_fixed, MAX_FIXED_COUNTERS); edx.split.bit_width_fixed = cap.bit_width_fixed; - edx.split.anythread_deprecated = 1; + if (cap.version) + edx.split.anythread_deprecated = 1; edx.split.reserved1 = 0; edx.split.reserved2 = 0; @@ -940,8 +941,21 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) unsigned virt_as = max((entry->eax >> 8) & 0xff, 48U); unsigned phys_as = entry->eax & 0xff; - if (!g_phys_as) + /* + * If TDP (NPT) is disabled use the adjusted host MAXPHYADDR as + * the guest operates in the same PA space as the host, i.e. + * reductions in MAXPHYADDR for memory encryption affect shadow + * paging, too. + * + * If TDP is enabled but an explicit guest MAXPHYADDR is not + * provided, use the raw bare metal MAXPHYADDR as reductions to + * the HPAs do not affect GPAs. + */ + if (!tdp_enabled) + g_phys_as = boot_cpu_data.x86_phys_bits; + else if (!g_phys_as) g_phys_as = phys_as; + entry->eax = g_phys_as | (virt_as << 8); entry->edx = 0; cpuid_entry_override(entry, CPUID_8000_0008_EBX); @@ -964,12 +978,18 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) case 0x8000001a: case 0x8000001e: break; - /* Support memory encryption cpuid if host supports it */ case 0x8000001F: - if (!kvm_cpu_cap_has(X86_FEATURE_SEV)) + if (!kvm_cpu_cap_has(X86_FEATURE_SEV)) { entry->eax = entry->ebx = entry->ecx = entry->edx = 0; - else + } else { cpuid_entry_override(entry, CPUID_8000_001F_EAX); + + /* + * Enumerate '0' for "PA bits reduction", the adjusted + * MAXPHYADDR is enumerated directly (see 0x80000008). + */ + entry->ebx &= ~GENMASK(11, 6); + } break; /*Add support for Centaur's CPUID instruction*/ case 0xC0000000: diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 845d114ae0..66f7f5bc34 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -53,6 +53,8 @@ #include #include "trace.h" +#include "paging.h" + extern bool itlb_multihit_kvm_mitigation; int __read_mostly nx_huge_pages = -1; diff --git a/arch/x86/kvm/mmu/paging.h b/arch/x86/kvm/mmu/paging.h new file mode 100644 index 0000000000..de8ab323bb --- /dev/null +++ b/arch/x86/kvm/mmu/paging.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Shadow paging constants/helpers that don't need to be #undef'd. */ +#ifndef __KVM_X86_PAGING_H +#define __KVM_X86_PAGING_H + +#define GUEST_PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1)) +#define PT64_LVL_ADDR_MASK(level) \ + (GUEST_PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \ + * PT64_LEVEL_BITS))) - 1)) +#define PT64_LVL_OFFSET_MASK(level) \ + (GUEST_PT64_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \ + * PT64_LEVEL_BITS))) - 1)) +#endif /* __KVM_X86_PAGING_H */ + diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h index 490a028dda..ee044d357b 100644 --- a/arch/x86/kvm/mmu/paging_tmpl.h +++ b/arch/x86/kvm/mmu/paging_tmpl.h @@ -24,7 +24,7 @@ #define pt_element_t u64 #define guest_walker guest_walker64 #define FNAME(name) paging##64_##name - #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK + #define PT_BASE_ADDR_MASK GUEST_PT64_BASE_ADDR_MASK #define PT_LVL_ADDR_MASK(lvl) PT64_LVL_ADDR_MASK(lvl) #define PT_LVL_OFFSET_MASK(lvl) PT64_LVL_OFFSET_MASK(lvl) #define PT_INDEX(addr, level) PT64_INDEX(addr, level) @@ -57,7 +57,7 @@ #define pt_element_t u64 #define guest_walker guest_walkerEPT #define FNAME(name) ept_##name - #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK + #define PT_BASE_ADDR_MASK GUEST_PT64_BASE_ADDR_MASK #define PT_LVL_ADDR_MASK(lvl) PT64_LVL_ADDR_MASK(lvl) #define PT_LVL_OFFSET_MASK(lvl) PT64_LVL_OFFSET_MASK(lvl) #define PT_INDEX(addr, level) PT64_INDEX(addr, level) diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h index 7a5ce93141..eb7b227fc6 100644 --- a/arch/x86/kvm/mmu/spte.h +++ b/arch/x86/kvm/mmu/spte.h @@ -38,12 +38,6 @@ static_assert(SPTE_TDP_AD_ENABLED_MASK == 0); #else #define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1)) #endif -#define PT64_LVL_ADDR_MASK(level) \ - (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \ - * PT64_LEVEL_BITS))) - 1)) -#define PT64_LVL_OFFSET_MASK(level) \ - (PT64_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \ - * PT64_LEVEL_BITS))) - 1)) #define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | shadow_user_mask \ | shadow_x_mask | shadow_nx_mask | shadow_me_mask) diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index 21d03e3a5d..3bd09c50c9 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -154,6 +154,10 @@ void recalc_intercepts(struct vcpu_svm *svm) for (i = 0; i < MAX_INTERCEPT; i++) c->intercepts[i] |= g->intercepts[i]; + + /* If SMI is not intercepted, ignore guest SMI intercept as well */ + if (!intercept_smi) + vmcb_clr_intercept(c, INTERCEPT_SMI); } static void copy_vmcb_control_area(struct vmcb_control_area *dst, @@ -304,8 +308,8 @@ static bool nested_vmcb_valid_sregs(struct kvm_vcpu *vcpu, return true; } -static void nested_load_control_from_vmcb12(struct vcpu_svm *svm, - struct vmcb_control_area *control) +void nested_load_control_from_vmcb12(struct vcpu_svm *svm, + struct vmcb_control_area *control) { copy_vmcb_control_area(&svm->nested.ctl, control); @@ -618,6 +622,11 @@ int nested_svm_vmrun(struct kvm_vcpu *vcpu) struct kvm_host_map map; u64 vmcb12_gpa; + if (!svm->nested.hsave_msr) { + kvm_inject_gp(vcpu, 0); + return 1; + } + if (is_smm(vcpu)) { kvm_queue_exception(vcpu, UD_VECTOR); return 1; @@ -692,6 +701,27 @@ int nested_svm_vmrun(struct kvm_vcpu *vcpu) return ret; } +/* Copy state save area fields which are handled by VMRUN */ +void svm_copy_vmrun_state(struct vmcb_save_area *from_save, + struct vmcb_save_area *to_save) +{ + to_save->es = from_save->es; + to_save->cs = from_save->cs; + to_save->ss = from_save->ss; + to_save->ds = from_save->ds; + to_save->gdtr = from_save->gdtr; + to_save->idtr = from_save->idtr; + to_save->rflags = from_save->rflags | X86_EFLAGS_FIXED; + to_save->efer = from_save->efer; + to_save->cr0 = from_save->cr0; + to_save->cr3 = from_save->cr3; + to_save->cr4 = from_save->cr4; + to_save->rax = from_save->rax; + to_save->rsp = from_save->rsp; + to_save->rip = from_save->rip; + to_save->cpl = 0; +} + void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb) { to_vmcb->save.fs = from_vmcb->save.fs; @@ -1355,28 +1385,11 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu, svm->nested.vmcb12_gpa = kvm_state->hdr.svm.vmcb_pa; - svm->vmcb01.ptr->save.es = save->es; - svm->vmcb01.ptr->save.cs = save->cs; - svm->vmcb01.ptr->save.ss = save->ss; - svm->vmcb01.ptr->save.ds = save->ds; - svm->vmcb01.ptr->save.gdtr = save->gdtr; - svm->vmcb01.ptr->save.idtr = save->idtr; - svm->vmcb01.ptr->save.rflags = save->rflags | X86_EFLAGS_FIXED; - svm->vmcb01.ptr->save.efer = save->efer; - svm->vmcb01.ptr->save.cr0 = save->cr0; - svm->vmcb01.ptr->save.cr3 = save->cr3; - svm->vmcb01.ptr->save.cr4 = save->cr4; - svm->vmcb01.ptr->save.rax = save->rax; - svm->vmcb01.ptr->save.rsp = save->rsp; - svm->vmcb01.ptr->save.rip = save->rip; - svm->vmcb01.ptr->save.cpl = 0; - + svm_copy_vmrun_state(save, &svm->vmcb01.ptr->save); nested_load_control_from_vmcb12(svm, ctl); svm_switch_vmcb(svm, &svm->nested.vmcb02); - nested_vmcb02_prepare_control(svm); - kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu); ret = 0; out_free: diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 62926f1a5f..6710d9ee2e 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -1272,8 +1272,8 @@ static int sev_send_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp) /* Pin guest memory */ guest_page = sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK, PAGE_SIZE, &n, 0); - if (!guest_page) - return -EFAULT; + if (IS_ERR(guest_page)) + return PTR_ERR(guest_page); /* allocate memory for header and transport buffer */ ret = -ENOMEM; @@ -1310,8 +1310,9 @@ static int sev_send_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp) } /* Copy packet header to userspace. */ - ret = copy_to_user((void __user *)(uintptr_t)params.hdr_uaddr, hdr, - params.hdr_len); + if (copy_to_user((void __user *)(uintptr_t)params.hdr_uaddr, hdr, + params.hdr_len)) + ret = -EFAULT; e_free_trans_data: kfree(trans_data); @@ -1463,11 +1464,12 @@ static int sev_receive_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp) data.trans_len = params.trans_len; /* Pin guest memory */ - ret = -EFAULT; guest_page = sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK, PAGE_SIZE, &n, 0); - if (!guest_page) + if (IS_ERR(guest_page)) { + ret = PTR_ERR(guest_page); goto e_free_trans; + } /* The RECEIVE_UPDATE_DATA command requires C-bit to be always set. */ data.guest_address = (page_to_pfn(guest_page[0]) << PAGE_SHIFT) + offset; diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 8834822c00..664d20f068 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -198,6 +198,11 @@ module_param(avic, bool, 0444); bool __read_mostly dump_invalid_vmcb; module_param(dump_invalid_vmcb, bool, 0644); + +bool intercept_smi = true; +module_param(intercept_smi, bool, 0444); + + static bool svm_gp_erratum_intercept = true; static u8 rsm_ins_bytes[] = "\x0f\xaa"; @@ -1185,7 +1190,10 @@ static void init_vmcb(struct kvm_vcpu *vcpu) svm_set_intercept(svm, INTERCEPT_INTR); svm_set_intercept(svm, INTERCEPT_NMI); - svm_set_intercept(svm, INTERCEPT_SMI); + + if (intercept_smi) + svm_set_intercept(svm, INTERCEPT_SMI); + svm_set_intercept(svm, INTERCEPT_SELECTIVE_CR0); svm_set_intercept(svm, INTERCEPT_RDPMC); svm_set_intercept(svm, INTERCEPT_CPUID); @@ -1923,7 +1931,7 @@ static int npf_interception(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); - u64 fault_address = __sme_clr(svm->vmcb->control.exit_info_2); + u64 fault_address = svm->vmcb->control.exit_info_2; u64 error_code = svm->vmcb->control.exit_info_1; trace_kvm_page_fault(fault_address, error_code); @@ -2106,6 +2114,11 @@ static int nmi_interception(struct kvm_vcpu *vcpu) return 1; } +static int smi_interception(struct kvm_vcpu *vcpu) +{ + return 1; +} + static int intr_interception(struct kvm_vcpu *vcpu) { ++vcpu->stat.irq_exits; @@ -2941,7 +2954,16 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) svm_disable_lbrv(vcpu); break; case MSR_VM_HSAVE_PA: - svm->nested.hsave_msr = data; + /* + * Old kernels did not validate the value written to + * MSR_VM_HSAVE_PA. Allow KVM_SET_MSR to set an invalid + * value to allow live migrating buggy or malicious guests + * originating from those kernels. + */ + if (!msr->host_initiated && !page_address_valid(vcpu, data)) + return 1; + + svm->nested.hsave_msr = data & PAGE_MASK; break; case MSR_VM_CR: return svm_set_vm_cr(vcpu, data); @@ -3080,8 +3102,7 @@ static int (*const svm_exit_handlers[])(struct kvm_vcpu *vcpu) = { [SVM_EXIT_EXCP_BASE + GP_VECTOR] = gp_interception, [SVM_EXIT_INTR] = intr_interception, [SVM_EXIT_NMI] = nmi_interception, - [SVM_EXIT_SMI] = kvm_emulate_as_nop, - [SVM_EXIT_INIT] = kvm_emulate_as_nop, + [SVM_EXIT_SMI] = smi_interception, [SVM_EXIT_VINTR] = interrupt_window_interception, [SVM_EXIT_RDPMC] = kvm_emulate_rdpmc, [SVM_EXIT_CPUID] = kvm_emulate_cpuid, @@ -4288,6 +4309,7 @@ static int svm_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection) static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate) { struct vcpu_svm *svm = to_svm(vcpu); + struct kvm_host_map map_save; int ret; if (is_guest_mode(vcpu)) { @@ -4303,6 +4325,29 @@ static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate) ret = nested_svm_vmexit(svm); if (ret) return ret; + + /* + * KVM uses VMCB01 to store L1 host state while L2 runs but + * VMCB01 is going to be used during SMM and thus the state will + * be lost. Temporary save non-VMLOAD/VMSAVE state to the host save + * area pointed to by MSR_VM_HSAVE_PA. APM guarantees that the + * format of the area is identical to guest save area offsetted + * by 0x400 (matches the offset of 'struct vmcb_save_area' + * within 'struct vmcb'). Note: HSAVE area may also be used by + * L1 hypervisor to save additional host context (e.g. KVM does + * that, see svm_prepare_guest_switch()) which must be + * preserved. + */ + if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr), + &map_save) == -EINVAL) + return 1; + + BUILD_BUG_ON(offsetof(struct vmcb, save) != 0x400); + + svm_copy_vmrun_state(&svm->vmcb01.ptr->save, + map_save.hva + 0x400); + + kvm_vcpu_unmap(vcpu, &map_save, true); } return 0; } @@ -4310,13 +4355,14 @@ static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate) static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate) { struct vcpu_svm *svm = to_svm(vcpu); - struct kvm_host_map map; + struct kvm_host_map map, map_save; int ret = 0; if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) { u64 saved_efer = GET_SMSTATE(u64, smstate, 0x7ed0); u64 guest = GET_SMSTATE(u64, smstate, 0x7ed8); u64 vmcb12_gpa = GET_SMSTATE(u64, smstate, 0x7ee0); + struct vmcb *vmcb12; if (guest) { if (!guest_cpuid_has(vcpu, X86_FEATURE_SVM)) @@ -4332,8 +4378,25 @@ static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate) if (svm_allocate_nested(svm)) return 1; - ret = enter_svm_guest_mode(vcpu, vmcb12_gpa, map.hva); + vmcb12 = map.hva; + + nested_load_control_from_vmcb12(svm, &vmcb12->control); + + ret = enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12); kvm_vcpu_unmap(vcpu, &map, true); + + /* + * Restore L1 host state from L1 HSAVE area as VMCB01 was + * used during SMM (see svm_enter_smm()) + */ + if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr), + &map_save) == -EINVAL) + return 1; + + svm_copy_vmrun_state(map_save.hva + 0x400, + &svm->vmcb01.ptr->save); + + kvm_vcpu_unmap(vcpu, &map_save, true); } } diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index f89b623bb5..7e2090752d 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -31,6 +31,7 @@ #define MSRPM_OFFSETS 16 extern u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly; extern bool npt_enabled; +extern bool intercept_smi; /* * Clean bits in VMCB. @@ -463,6 +464,8 @@ void svm_leave_nested(struct vcpu_svm *svm); void svm_free_nested(struct vcpu_svm *svm); int svm_allocate_nested(struct vcpu_svm *svm); int nested_svm_vmrun(struct kvm_vcpu *vcpu); +void svm_copy_vmrun_state(struct vmcb_save_area *from_save, + struct vmcb_save_area *to_save); void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb); int nested_svm_vmexit(struct vcpu_svm *svm); @@ -479,6 +482,8 @@ int nested_svm_check_permissions(struct kvm_vcpu *vcpu); int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr, bool has_error_code, u32 error_code); int nested_svm_exit_special(struct vcpu_svm *svm); +void nested_load_control_from_vmcb12(struct vcpu_svm *svm, + struct vmcb_control_area *control); void nested_sync_control_from_vmcb02(struct vcpu_svm *svm); void nested_vmcb02_compute_g_pat(struct vcpu_svm *svm); void svm_switch_vmcb(struct vcpu_svm *svm, struct kvm_vmcb_info *target_vmcb); diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h index 3979a94793..db88ed4f21 100644 --- a/arch/x86/kvm/vmx/vmx.h +++ b/arch/x86/kvm/vmx/vmx.h @@ -14,8 +14,6 @@ #include "vmx_ops.h" #include "cpuid.h" -extern const u32 vmx_msr_index[]; - #define MSR_TYPE_R 1 #define MSR_TYPE_W 2 #define MSR_TYPE_RW 3 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index c6dc1b4452..a4fd10604f 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -9601,6 +9601,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) set_debugreg(vcpu->arch.eff_db[3], 3); set_debugreg(vcpu->arch.dr6, 6); vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD; + } else if (unlikely(hw_breakpoint_active())) { + set_debugreg(0, 7); } for (;;) { @@ -10985,9 +10987,6 @@ int kvm_arch_hardware_setup(void *opaque) int r; rdmsrl_safe(MSR_EFER, &host_efer); - if (WARN_ON_ONCE(boot_cpu_has(X86_FEATURE_NX) && - !(host_efer & EFER_NX))) - return -EIO; if (boot_cpu_has(X86_FEATURE_XSAVES)) rdmsrl(MSR_IA32_XSS, host_xss); diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index e835164189..4b951458c9 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -570,6 +570,9 @@ static void bpf_tail_call_direct_fixup(struct bpf_prog *prog) for (i = 0; i < prog->aux->size_poke_tab; i++) { poke = &prog->aux->poke_tab[i]; + if (poke->aux && poke->aux != prog->aux) + continue; + WARN_ON_ONCE(READ_ONCE(poke->tailcall_target_stable)); if (poke->reason != BPF_POKE_REASON_TAIL_CALL) diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 8d49f8fa98..d83fee21f6 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -502,34 +502,21 @@ static int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg) static int blkif_ioctl(struct block_device *bdev, fmode_t mode, unsigned command, unsigned long argument) { - struct blkfront_info *info = bdev->bd_disk->private_data; int i; - dev_dbg(&info->xbdev->dev, "command: 0x%x, argument: 0x%lx\n", - command, (long)argument); - switch (command) { case CDROMMULTISESSION: - dev_dbg(&info->xbdev->dev, "FIXME: support multisession CDs later\n"); for (i = 0; i < sizeof(struct cdrom_multisession); i++) if (put_user(0, (char __user *)(argument + i))) return -EFAULT; return 0; - - case CDROM_GET_CAPABILITY: { - struct gendisk *gd = info->gd; - if (gd->flags & GENHD_FL_CD) + case CDROM_GET_CAPABILITY: + if (bdev->bd_disk->flags & GENHD_FL_CD) return 0; return -EINVAL; - } - default: - /*printk(KERN_ALERT "ioctl %08x not supported by Xen blkdev\n", - command);*/ - return -EINVAL; /* same return as native Linux */ + return -EINVAL; } - - return 0; } static unsigned long blkif_ring_get_request(struct blkfront_ring_info *rinfo, @@ -1177,36 +1164,6 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity, return err; } -static void xlvbd_release_gendisk(struct blkfront_info *info) -{ - unsigned int minor, nr_minors, i; - struct blkfront_ring_info *rinfo; - - if (info->rq == NULL) - return; - - /* No more blkif_request(). */ - blk_mq_stop_hw_queues(info->rq); - - for_each_rinfo(info, rinfo, i) { - /* No more gnttab callback work. */ - gnttab_cancel_free_callback(&rinfo->callback); - - /* Flush gnttab callback work. Must be done with no locks held. */ - flush_work(&rinfo->work); - } - - del_gendisk(info->gd); - - minor = info->gd->first_minor; - nr_minors = info->gd->minors; - xlbd_release_minors(minor, nr_minors); - - blk_cleanup_disk(info->gd); - info->gd = NULL; - blk_mq_free_tag_set(&info->tag_set); -} - /* Already hold rinfo->ring_lock. */ static inline void kick_pending_request_queues_locked(struct blkfront_ring_info *rinfo) { @@ -1756,12 +1713,6 @@ static int write_per_ring_nodes(struct xenbus_transaction xbt, return err; } -static void free_info(struct blkfront_info *info) -{ - list_del(&info->info_list); - kfree(info); -} - /* Common code used when first setting up, and when resuming. */ static int talk_to_blkback(struct xenbus_device *dev, struct blkfront_info *info) @@ -1880,13 +1831,6 @@ static int talk_to_blkback(struct xenbus_device *dev, xenbus_dev_fatal(dev, err, "%s", message); destroy_blkring: blkif_free(info, 0); - - mutex_lock(&blkfront_mutex); - free_info(info); - mutex_unlock(&blkfront_mutex); - - dev_set_drvdata(&dev->dev, NULL); - return err; } @@ -2126,38 +2070,26 @@ static int blkfront_resume(struct xenbus_device *dev) static void blkfront_closing(struct blkfront_info *info) { struct xenbus_device *xbdev = info->xbdev; - struct block_device *bdev = NULL; + struct blkfront_ring_info *rinfo; + unsigned int i; - mutex_lock(&info->mutex); - - if (xbdev->state == XenbusStateClosing) { - mutex_unlock(&info->mutex); + if (xbdev->state == XenbusStateClosing) return; + + /* No more blkif_request(). */ + blk_mq_stop_hw_queues(info->rq); + blk_set_queue_dying(info->rq); + set_capacity(info->gd, 0); + + for_each_rinfo(info, rinfo, i) { + /* No more gnttab callback work. */ + gnttab_cancel_free_callback(&rinfo->callback); + + /* Flush gnttab callback work. Must be done with no locks held. */ + flush_work(&rinfo->work); } - if (info->gd) - bdev = bdgrab(info->gd->part0); - - mutex_unlock(&info->mutex); - - if (!bdev) { - xenbus_frontend_closed(xbdev); - return; - } - - mutex_lock(&bdev->bd_disk->open_mutex); - - if (bdev->bd_openers) { - xenbus_dev_error(xbdev, -EBUSY, - "Device in use; refusing to close"); - xenbus_switch_state(xbdev, XenbusStateClosing); - } else { - xlvbd_release_gendisk(info); - xenbus_frontend_closed(xbdev); - } - - mutex_unlock(&bdev->bd_disk->open_mutex); - bdput(bdev); + xenbus_frontend_closed(xbdev); } static void blkfront_setup_discard(struct blkfront_info *info) @@ -2472,8 +2404,7 @@ static void blkback_changed(struct xenbus_device *dev, break; fallthrough; case XenbusStateClosing: - if (info) - blkfront_closing(info); + blkfront_closing(info); break; } } @@ -2481,56 +2412,21 @@ static void blkback_changed(struct xenbus_device *dev, static int blkfront_remove(struct xenbus_device *xbdev) { struct blkfront_info *info = dev_get_drvdata(&xbdev->dev); - struct block_device *bdev = NULL; - struct gendisk *disk; dev_dbg(&xbdev->dev, "%s removed", xbdev->nodename); - if (!info) - return 0; + del_gendisk(info->gd); + + mutex_lock(&blkfront_mutex); + list_del(&info->info_list); + mutex_unlock(&blkfront_mutex); blkif_free(info, 0); + xlbd_release_minors(info->gd->first_minor, info->gd->minors); + blk_cleanup_disk(info->gd); + blk_mq_free_tag_set(&info->tag_set); - mutex_lock(&info->mutex); - - disk = info->gd; - if (disk) - bdev = bdgrab(disk->part0); - - info->xbdev = NULL; - mutex_unlock(&info->mutex); - - if (!bdev) { - mutex_lock(&blkfront_mutex); - free_info(info); - mutex_unlock(&blkfront_mutex); - return 0; - } - - /* - * The xbdev was removed before we reached the Closed - * state. See if it's safe to remove the disk. If the bdev - * isn't closed yet, we let release take care of it. - */ - - mutex_lock(&disk->open_mutex); - info = disk->private_data; - - dev_warn(disk_to_dev(disk), - "%s was hot-unplugged, %d stale handles\n", - xbdev->nodename, bdev->bd_openers); - - if (info && !bdev->bd_openers) { - xlvbd_release_gendisk(info); - disk->private_data = NULL; - mutex_lock(&blkfront_mutex); - free_info(info); - mutex_unlock(&blkfront_mutex); - } - - mutex_unlock(&disk->open_mutex); - bdput(bdev); - + kfree(info); return 0; } @@ -2541,77 +2437,9 @@ static int blkfront_is_ready(struct xenbus_device *dev) return info->is_ready && info->xbdev; } -static int blkif_open(struct block_device *bdev, fmode_t mode) -{ - struct gendisk *disk = bdev->bd_disk; - struct blkfront_info *info; - int err = 0; - - mutex_lock(&blkfront_mutex); - - info = disk->private_data; - if (!info) { - /* xbdev gone */ - err = -ERESTARTSYS; - goto out; - } - - mutex_lock(&info->mutex); - - if (!info->gd) - /* xbdev is closed */ - err = -ERESTARTSYS; - - mutex_unlock(&info->mutex); - -out: - mutex_unlock(&blkfront_mutex); - return err; -} - -static void blkif_release(struct gendisk *disk, fmode_t mode) -{ - struct blkfront_info *info = disk->private_data; - struct xenbus_device *xbdev; - - mutex_lock(&blkfront_mutex); - if (disk->part0->bd_openers) - goto out_mutex; - - /* - * Check if we have been instructed to close. We will have - * deferred this request, because the bdev was still open. - */ - - mutex_lock(&info->mutex); - xbdev = info->xbdev; - - if (xbdev && xbdev->state == XenbusStateClosing) { - /* pending switch to state closed */ - dev_info(disk_to_dev(disk), "releasing disk\n"); - xlvbd_release_gendisk(info); - xenbus_frontend_closed(info->xbdev); - } - - mutex_unlock(&info->mutex); - - if (!xbdev) { - /* sudden device removal */ - dev_info(disk_to_dev(disk), "releasing disk\n"); - xlvbd_release_gendisk(info); - disk->private_data = NULL; - free_info(info); - } - -out_mutex: - mutex_unlock(&blkfront_mutex); -} - static const struct block_device_operations xlvbd_block_fops = { .owner = THIS_MODULE, - .open = blkif_open, - .release = blkif_release, .getgeo = blkif_getgeo, .ioctl = blkif_ioctl, .compat_ioctl = blkdev_compat_ptr_ioctl, diff --git a/drivers/char/powernv-op-panel.c b/drivers/char/powernv-op-panel.c index 027484ecfb..3c99696b14 100644 --- a/drivers/char/powernv-op-panel.c +++ b/drivers/char/powernv-op-panel.c @@ -75,6 +75,7 @@ static int __op_panel_update_display(void) rc); break; } + break; case OPAL_SUCCESS: break; default: diff --git a/drivers/clk/renesas/r9a07g044-cpg.c b/drivers/clk/renesas/r9a07g044-cpg.c index 50b5269586..ae24e0397d 100644 --- a/drivers/clk/renesas/r9a07g044-cpg.c +++ b/drivers/clk/renesas/r9a07g044-cpg.c @@ -30,8 +30,9 @@ enum clk_ids { CLK_PLL2_DIV20, CLK_PLL3, CLK_PLL3_DIV2, + CLK_PLL3_DIV2_4, + CLK_PLL3_DIV2_4_2, CLK_PLL3_DIV4, - CLK_PLL3_DIV8, CLK_PLL4, CLK_PLL5, CLK_PLL5_DIV2, @@ -42,12 +43,13 @@ enum clk_ids { }; /* Divider tables */ -static const struct clk_div_table dtable_3b[] = { +static const struct clk_div_table dtable_1_32[] = { {0, 1}, {1, 2}, {2, 4}, {3, 8}, {4, 32}, + {0, 0}, }; static const struct cpg_core_clk r9a07g044_core_clks[] __initconst = { @@ -66,47 +68,56 @@ static const struct cpg_core_clk r9a07g044_core_clks[] __initconst = { DEF_FIXED(".pll2_div20", CLK_PLL2_DIV20, CLK_PLL2, 1, 20), DEF_FIXED(".pll3_div2", CLK_PLL3_DIV2, CLK_PLL3, 1, 2), + DEF_FIXED(".pll3_div2_4", CLK_PLL3_DIV2_4, CLK_PLL3_DIV2, 1, 4), + DEF_FIXED(".pll3_div2_4_2", CLK_PLL3_DIV2_4_2, CLK_PLL3_DIV2_4, 1, 2), DEF_FIXED(".pll3_div4", CLK_PLL3_DIV4, CLK_PLL3, 1, 4), - DEF_FIXED(".pll3_div8", CLK_PLL3_DIV8, CLK_PLL3, 1, 8), /* Core output clk */ DEF_FIXED("I", R9A07G044_CLK_I, CLK_PLL1, 1, 1), DEF_DIV("P0", R9A07G044_CLK_P0, CLK_PLL2_DIV16, DIVPL2A, - dtable_3b, CLK_DIVIDER_HIWORD_MASK), + dtable_1_32, CLK_DIVIDER_HIWORD_MASK), DEF_FIXED("TSU", R9A07G044_CLK_TSU, CLK_PLL2_DIV20, 1, 1), - DEF_DIV("P1", R9A07G044_CLK_P1, CLK_PLL3_DIV8, - DIVPL3B, dtable_3b, CLK_DIVIDER_HIWORD_MASK), + DEF_DIV("P1", R9A07G044_CLK_P1, CLK_PLL3_DIV2_4, + DIVPL3B, dtable_1_32, CLK_DIVIDER_HIWORD_MASK), + DEF_DIV("P2", R9A07G044_CLK_P2, CLK_PLL3_DIV2_4_2, + DIVPL3A, dtable_1_32, CLK_DIVIDER_HIWORD_MASK), }; static struct rzg2l_mod_clk r9a07g044_mod_clks[] = { - DEF_MOD("gic", R9A07G044_CLK_GIC600, - R9A07G044_CLK_P1, - 0x514, BIT(0), (BIT(0) | BIT(1))), - DEF_MOD("ia55", R9A07G044_CLK_IA55, - R9A07G044_CLK_P1, - 0x518, (BIT(0) | BIT(1)), BIT(0)), - DEF_MOD("scif0", R9A07G044_CLK_SCIF0, - R9A07G044_CLK_P0, - 0x584, BIT(0), BIT(0)), - DEF_MOD("scif1", R9A07G044_CLK_SCIF1, - R9A07G044_CLK_P0, - 0x584, BIT(1), BIT(1)), - DEF_MOD("scif2", R9A07G044_CLK_SCIF2, - R9A07G044_CLK_P0, - 0x584, BIT(2), BIT(2)), - DEF_MOD("scif3", R9A07G044_CLK_SCIF3, - R9A07G044_CLK_P0, - 0x584, BIT(3), BIT(3)), - DEF_MOD("scif4", R9A07G044_CLK_SCIF4, - R9A07G044_CLK_P0, - 0x584, BIT(4), BIT(4)), - DEF_MOD("sci0", R9A07G044_CLK_SCI0, - R9A07G044_CLK_P0, - 0x588, BIT(0), BIT(0)), + DEF_MOD("gic", R9A07G044_GIC600_GICCLK, R9A07G044_CLK_P1, + 0x514, 0), + DEF_MOD("ia55_pclk", R9A07G044_IA55_PCLK, R9A07G044_CLK_P2, + 0x518, 0), + DEF_MOD("ia55_clk", R9A07G044_IA55_CLK, R9A07G044_CLK_P1, + 0x518, 1), + DEF_MOD("scif0", R9A07G044_SCIF0_CLK_PCK, R9A07G044_CLK_P0, + 0x584, 0), + DEF_MOD("scif1", R9A07G044_SCIF1_CLK_PCK, R9A07G044_CLK_P0, + 0x584, 1), + DEF_MOD("scif2", R9A07G044_SCIF2_CLK_PCK, R9A07G044_CLK_P0, + 0x584, 2), + DEF_MOD("scif3", R9A07G044_SCIF3_CLK_PCK, R9A07G044_CLK_P0, + 0x584, 3), + DEF_MOD("scif4", R9A07G044_SCIF4_CLK_PCK, R9A07G044_CLK_P0, + 0x584, 4), + DEF_MOD("sci0", R9A07G044_SCI0_CLKP, R9A07G044_CLK_P0, + 0x588, 0), +}; + +static struct rzg2l_reset r9a07g044_resets[] = { + DEF_RST(R9A07G044_GIC600_GICRESET_N, 0x814, 0), + DEF_RST(R9A07G044_GIC600_DBG_GICRESET_N, 0x814, 1), + DEF_RST(R9A07G044_IA55_RESETN, 0x818, 0), + DEF_RST(R9A07G044_SCIF0_RST_SYSTEM_N, 0x884, 0), + DEF_RST(R9A07G044_SCIF1_RST_SYSTEM_N, 0x884, 1), + DEF_RST(R9A07G044_SCIF2_RST_SYSTEM_N, 0x884, 2), + DEF_RST(R9A07G044_SCIF3_RST_SYSTEM_N, 0x884, 3), + DEF_RST(R9A07G044_SCIF4_RST_SYSTEM_N, 0x884, 4), + DEF_RST(R9A07G044_SCI0_RST, 0x888, 0), }; static const unsigned int r9a07g044_crit_mod_clks[] __initconst = { - MOD_CLK_BASE + R9A07G044_CLK_GIC600, + MOD_CLK_BASE + R9A07G044_GIC600_GICCLK, }; const struct rzg2l_cpg_info r9a07g044_cpg_info = { @@ -123,5 +134,9 @@ const struct rzg2l_cpg_info r9a07g044_cpg_info = { /* Module Clocks */ .mod_clks = r9a07g044_mod_clks, .num_mod_clks = ARRAY_SIZE(r9a07g044_mod_clks), - .num_hw_mod_clks = R9A07G044_CLK_MIPI_DSI_PIN + 1, + .num_hw_mod_clks = R9A07G044_TSU_PCLK + 1, + + /* Resets */ + .resets = r9a07g044_resets, + .num_resets = ARRAY_SIZE(r9a07g044_resets), }; diff --git a/drivers/clk/renesas/renesas-rzg2l-cpg.c b/drivers/clk/renesas/renesas-rzg2l-cpg.c index 5009b9e48b..e7c59af2a1 100644 --- a/drivers/clk/renesas/renesas-rzg2l-cpg.c +++ b/drivers/clk/renesas/renesas-rzg2l-cpg.c @@ -47,9 +47,9 @@ #define SDIV(val) DIV_RSMASK(val, 0, 0x7) #define CLK_ON_R(reg) (reg) -#define CLK_MON_R(reg) (0x680 - 0x500 + (reg)) -#define CLK_RST_R(reg) (0x800 - 0x500 + (reg)) -#define CLK_MRST_R(reg) (0x980 - 0x500 + (reg)) +#define CLK_MON_R(reg) (0x180 + (reg)) +#define CLK_RST_R(reg) (reg) +#define CLK_MRST_R(reg) (0x180 + (reg)) #define GET_REG_OFFSET(val) ((val >> 20) & 0xfff) #define GET_REG_SAMPLL_CLK1(val) ((val >> 22) & 0xfff) @@ -78,6 +78,7 @@ struct rzg2l_cpg_priv { struct clk **clks; unsigned int num_core_clks; unsigned int num_mod_clks; + unsigned int num_resets; unsigned int last_dt_core_clk; struct raw_notifier_head notifiers; @@ -315,15 +316,13 @@ rzg2l_cpg_register_core_clk(const struct cpg_core_clk *core, * * @hw: handle between common and hardware-specific interfaces * @off: register offset - * @onoff: ON/MON bits - * @reset: reset bits + * @bit: ON/MON bit * @priv: CPG/MSTP private data */ struct mstp_clock { struct clk_hw hw; u16 off; - u8 onoff; - u8 reset; + u8 bit; struct rzg2l_cpg_priv *priv; }; @@ -337,6 +336,7 @@ static int rzg2l_mod_clock_endisable(struct clk_hw *hw, bool enable) struct device *dev = priv->dev; unsigned long flags; unsigned int i; + u32 bitmask = BIT(clock->bit); u32 value; if (!clock->off) { @@ -349,9 +349,9 @@ static int rzg2l_mod_clock_endisable(struct clk_hw *hw, bool enable) spin_lock_irqsave(&priv->rmw_lock, flags); if (enable) - value = (clock->onoff << 16) | clock->onoff; + value = (bitmask << 16) | bitmask; else - value = clock->onoff << 16; + value = bitmask << 16; writel(value, priv->base + CLK_ON_R(reg)); spin_unlock_irqrestore(&priv->rmw_lock, flags); @@ -360,7 +360,7 @@ static int rzg2l_mod_clock_endisable(struct clk_hw *hw, bool enable) return 0; for (i = 1000; i > 0; --i) { - if (((readl(priv->base + CLK_MON_R(reg))) & clock->onoff)) + if (((readl(priv->base + CLK_MON_R(reg))) & bitmask)) break; cpu_relax(); } @@ -388,6 +388,7 @@ static int rzg2l_mod_clock_is_enabled(struct clk_hw *hw) { struct mstp_clock *clock = to_mod_clock(hw); struct rzg2l_cpg_priv *priv = clock->priv; + u32 bitmask = BIT(clock->bit); u32 value; if (!clock->off) { @@ -397,7 +398,7 @@ static int rzg2l_mod_clock_is_enabled(struct clk_hw *hw) value = readl(priv->base + CLK_MON_R(clock->off)); - return !(value & clock->onoff); + return !(value & bitmask); } static const struct clk_ops rzg2l_mod_clock_ops = { @@ -457,8 +458,7 @@ rzg2l_cpg_register_mod_clk(const struct rzg2l_mod_clk *mod, init.num_parents = 1; clock->off = mod->off; - clock->onoff = mod->onoff; - clock->reset = mod->reset; + clock->bit = mod->bit; clock->priv = priv; clock->hw.init = &init; @@ -483,12 +483,11 @@ static int rzg2l_cpg_reset(struct reset_controller_dev *rcdev, { struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev); const struct rzg2l_cpg_info *info = priv->info; - unsigned int reg = info->mod_clks[id].off; - u32 dis = info->mod_clks[id].reset; + unsigned int reg = info->resets[id].off; + u32 dis = BIT(info->resets[id].bit); u32 we = dis << 16; - dev_dbg(rcdev->dev, "reset name:%s id:%ld offset:0x%x\n", - info->mod_clks[id].name, id, CLK_RST_R(reg)); + dev_dbg(rcdev->dev, "reset id:%ld offset:0x%x\n", id, CLK_RST_R(reg)); /* Reset module */ writel(we, priv->base + CLK_RST_R(reg)); @@ -507,11 +506,10 @@ static int rzg2l_cpg_assert(struct reset_controller_dev *rcdev, { struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev); const struct rzg2l_cpg_info *info = priv->info; - unsigned int reg = info->mod_clks[id].off; - u32 value = info->mod_clks[id].reset << 16; + unsigned int reg = info->resets[id].off; + u32 value = BIT(info->resets[id].bit) << 16; - dev_dbg(rcdev->dev, "assert name:%s id:%ld offset:0x%x\n", - info->mod_clks[id].name, id, CLK_RST_R(reg)); + dev_dbg(rcdev->dev, "assert id:%ld offset:0x%x\n", id, CLK_RST_R(reg)); writel(value, priv->base + CLK_RST_R(reg)); return 0; @@ -522,12 +520,12 @@ static int rzg2l_cpg_deassert(struct reset_controller_dev *rcdev, { struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev); const struct rzg2l_cpg_info *info = priv->info; - unsigned int reg = info->mod_clks[id].off; - u32 dis = info->mod_clks[id].reset; + unsigned int reg = info->resets[id].off; + u32 dis = BIT(info->resets[id].bit); u32 value = (dis << 16) | dis; - dev_dbg(rcdev->dev, "deassert name:%s id:%ld offset:0x%x\n", - info->mod_clks[id].name, id, CLK_RST_R(reg)); + dev_dbg(rcdev->dev, "deassert id:%ld offset:0x%x\n", id, + CLK_RST_R(reg)); writel(value, priv->base + CLK_RST_R(reg)); return 0; @@ -538,8 +536,8 @@ static int rzg2l_cpg_status(struct reset_controller_dev *rcdev, { struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev); const struct rzg2l_cpg_info *info = priv->info; - unsigned int reg = info->mod_clks[id].off; - u32 bitmask = info->mod_clks[id].reset; + unsigned int reg = info->resets[id].off; + u32 bitmask = BIT(info->resets[id].bit); return !(readl(priv->base + CLK_MRST_R(reg)) & bitmask); } @@ -554,9 +552,11 @@ static const struct reset_control_ops rzg2l_cpg_reset_ops = { static int rzg2l_cpg_reset_xlate(struct reset_controller_dev *rcdev, const struct of_phandle_args *reset_spec) { + struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev); + const struct rzg2l_cpg_info *info = priv->info; unsigned int id = reset_spec->args[0]; - if (id >= rcdev->nr_resets) { + if (id >= rcdev->nr_resets || !info->resets[id].off) { dev_err(rcdev->dev, "Invalid reset index %u\n", id); return -EINVAL; } @@ -571,7 +571,7 @@ static int rzg2l_cpg_reset_controller_register(struct rzg2l_cpg_priv *priv) priv->rcdev.dev = priv->dev; priv->rcdev.of_reset_n_cells = 1; priv->rcdev.of_xlate = rzg2l_cpg_reset_xlate; - priv->rcdev.nr_resets = priv->num_mod_clks; + priv->rcdev.nr_resets = priv->num_resets; return devm_reset_controller_register(priv->dev, &priv->rcdev); } @@ -594,42 +594,49 @@ static int rzg2l_cpg_attach_dev(struct generic_pm_domain *unused, struct device { struct device_node *np = dev->of_node; struct of_phandle_args clkspec; + bool once = true; struct clk *clk; int error; int i = 0; while (!of_parse_phandle_with_args(np, "clocks", "#clock-cells", i, &clkspec)) { - if (rzg2l_cpg_is_pm_clk(&clkspec)) - goto found; + if (rzg2l_cpg_is_pm_clk(&clkspec)) { + if (once) { + once = false; + error = pm_clk_create(dev); + if (error) { + of_node_put(clkspec.np); + goto err; + } + } + clk = of_clk_get_from_provider(&clkspec); + of_node_put(clkspec.np); + if (IS_ERR(clk)) { + error = PTR_ERR(clk); + goto fail_destroy; + } - of_node_put(clkspec.np); + error = pm_clk_add_clk(dev, clk); + if (error) { + dev_err(dev, "pm_clk_add_clk failed %d\n", + error); + goto fail_put; + } + } else { + of_node_put(clkspec.np); + } i++; } return 0; -found: - clk = of_clk_get_from_provider(&clkspec); - of_node_put(clkspec.np); - - if (IS_ERR(clk)) - return PTR_ERR(clk); - - error = pm_clk_create(dev); - if (error) - goto fail_put; - - error = pm_clk_add_clk(dev, clk); - if (error) - goto fail_destroy; - - return 0; +fail_put: + clk_put(clk); fail_destroy: pm_clk_destroy(dev); -fail_put: - clk_put(clk); +err: return error; } @@ -692,6 +699,7 @@ static int __init rzg2l_cpg_probe(struct platform_device *pdev) priv->clks = clks; priv->num_core_clks = info->num_total_core_clks; priv->num_mod_clks = info->num_hw_mod_clks; + priv->num_resets = info->num_resets; priv->last_dt_core_clk = info->last_dt_core_clk; for (i = 0; i < nclks; i++) diff --git a/drivers/clk/renesas/renesas-rzg2l-cpg.h b/drivers/clk/renesas/renesas-rzg2l-cpg.h index 3948bdd8af..63695280ce 100644 --- a/drivers/clk/renesas/renesas-rzg2l-cpg.h +++ b/drivers/clk/renesas/renesas-rzg2l-cpg.h @@ -21,6 +21,7 @@ #define DDIV_PACK(offset, bitpos, size) \ (((offset) << 20) | ((bitpos) << 12) | ((size) << 8)) #define DIVPL2A DDIV_PACK(CPG_PL2_DDIV, 0, 3) +#define DIVPL3A DDIV_PACK(CPG_PL3A_DDIV, 0, 3) #define DIVPL3B DDIV_PACK(CPG_PL3A_DDIV, 4, 3) /** @@ -76,26 +77,40 @@ enum clk_types { * @id: clock index in array containing all Core and Module Clocks * @parent: id of parent clock * @off: register offset - * @onoff: ON/MON bits - * @reset: reset bits + * @bit: ON/MON bit */ struct rzg2l_mod_clk { const char *name; unsigned int id; unsigned int parent; u16 off; - u8 onoff; - u8 reset; + u8 bit; }; -#define DEF_MOD(_name, _id, _parent, _off, _onoff, _reset) \ - [_id] = { \ +#define DEF_MOD(_name, _id, _parent, _off, _bit) \ + { \ .name = _name, \ - .id = MOD_CLK_BASE + _id, \ + .id = MOD_CLK_BASE + (_id), \ .parent = (_parent), \ .off = (_off), \ - .onoff = (_onoff), \ - .reset = (_reset) \ + .bit = (_bit), \ + } + +/** + * struct rzg2l_reset - Reset definitions + * + * @off: register offset + * @bit: reset bit + */ +struct rzg2l_reset { + u16 off; + u8 bit; +}; + +#define DEF_RST(_id, _off, _bit) \ + [_id] = { \ + .off = (_off), \ + .bit = (_bit) \ } /** @@ -126,6 +141,10 @@ struct rzg2l_cpg_info { unsigned int num_mod_clks; unsigned int num_hw_mod_clks; + /* Resets */ + const struct rzg2l_reset *resets; + unsigned int num_resets; + /* Critical Module Clocks that should not be disabled */ const unsigned int *crit_mod_clks; unsigned int num_crit_mod_clks; diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c index 182a4dbca0..c538a153ee 100644 --- a/drivers/cpufreq/longhaul.c +++ b/drivers/cpufreq/longhaul.c @@ -942,8 +942,6 @@ static int __init longhaul_init(void) return cpufreq_register_driver(&longhaul_driver); case 10: pr_err("Use acpi-cpufreq driver for VIA C7\n"); - default: - ; } return -ENODEV; diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c index 20d9bddbb9..394e6e1e96 100644 --- a/drivers/dma-buf/sync_file.c +++ b/drivers/dma-buf/sync_file.c @@ -211,8 +211,8 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a, struct sync_file *b) { struct sync_file *sync_file; - struct dma_fence **fences, **nfences, **a_fences, **b_fences; - int i, i_a, i_b, num_fences, a_num_fences, b_num_fences; + struct dma_fence **fences = NULL, **nfences, **a_fences, **b_fences; + int i = 0, i_a, i_b, num_fences, a_num_fences, b_num_fences; sync_file = sync_file_alloc(); if (!sync_file) @@ -236,7 +236,7 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a, * If a sync_file can only be created with sync_file_merge * and sync_file_create, this is a reasonable assumption. */ - for (i = i_a = i_b = 0; i_a < a_num_fences && i_b < b_num_fences; ) { + for (i_a = i_b = 0; i_a < a_num_fences && i_b < b_num_fences; ) { struct dma_fence *pt_a = a_fences[i_a]; struct dma_fence *pt_b = b_fences[i_b]; @@ -277,15 +277,16 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a, fences = nfences; } - if (sync_file_set_fence(sync_file, fences, i) < 0) { - kfree(fences); + if (sync_file_set_fence(sync_file, fences, i) < 0) goto err; - } strlcpy(sync_file->user_name, name, sizeof(sync_file->user_name)); return sync_file; err: + while (i) + dma_fence_put(fences[--i]); + kfree(fences); fput(sync_file->file); return NULL; diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c index 104ad420ab..baab1ca9f6 100644 --- a/drivers/dma/ipu/ipu_idmac.c +++ b/drivers/dma/ipu/ipu_idmac.c @@ -618,6 +618,7 @@ static int ipu_enable_channel(struct idmac *idmac, struct idmac_channel *ichan) case IDMAC_SDC_1: case IDMAC_IC_7: ipu_channel_set_priority(ipu, channel, true); + break; default: break; } @@ -978,6 +979,7 @@ static int ipu_init_channel(struct idmac *idmac, struct idmac_channel *ichan) case IDMAC_SDC_0: case IDMAC_SDC_1: n_desc = 4; + break; default: break; } diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c index c1a69149c8..4a51fdbf5a 100644 --- a/drivers/dma/mpc512x_dma.c +++ b/drivers/dma/mpc512x_dma.c @@ -813,6 +813,7 @@ inline bool is_buswidth_valid(u8 buswidth, bool is_mpc8308) case 16: if (is_mpc8308) return false; + break; case 1: case 2: case 4: diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c index 96ad21869b..a358586107 100644 --- a/drivers/dma/ti/k3-udma.c +++ b/drivers/dma/ti/k3-udma.c @@ -4948,6 +4948,7 @@ static int setup_resources(struct udma_dev *ud) ud->tchan_cnt), ud->rchan_cnt - bitmap_weight(ud->rchan_map, ud->rchan_cnt)); + break; default: break; } diff --git a/drivers/firmware/arm_ffa/bus.c b/drivers/firmware/arm_ffa/bus.c index 83166e02b1..00fe595a5b 100644 --- a/drivers/firmware/arm_ffa/bus.c +++ b/drivers/firmware/arm_ffa/bus.c @@ -46,9 +46,6 @@ static int ffa_device_probe(struct device *dev) struct ffa_driver *ffa_drv = to_ffa_driver(dev->driver); struct ffa_device *ffa_dev = to_ffa_dev(dev); - if (!ffa_device_match(dev, dev->driver)) - return -ENODEV; - return ffa_drv->probe(ffa_dev); } @@ -99,6 +96,9 @@ int ffa_driver_register(struct ffa_driver *driver, struct module *owner, { int ret; + if (!driver->probe) + return -EINVAL; + driver->driver.bus = &ffa_bus_type; driver->driver.name = driver->name; driver->driver.owner = owner; diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c index b1edb4b2e9..c9fb56afbc 100644 --- a/drivers/firmware/arm_ffa/driver.c +++ b/drivers/firmware/arm_ffa/driver.c @@ -120,7 +120,7 @@ #define PACK_TARGET_INFO(s, r) \ (FIELD_PREP(SENDER_ID_MASK, (s)) | FIELD_PREP(RECEIVER_ID_MASK, (r))) -/** +/* * FF-A specification mentions explicitly about '4K pages'. This should * not be confused with the kernel PAGE_SIZE, which is the translation * granule kernel is configured and may be one among 4K, 16K and 64K. @@ -149,8 +149,10 @@ static const int ffa_linux_errmap[] = { static inline int ffa_to_linux_errno(int errno) { - if (errno < FFA_RET_SUCCESS && errno >= -ARRAY_SIZE(ffa_linux_errmap)) - return ffa_linux_errmap[-errno]; + int err_idx = -errno; + + if (err_idx >= 0 && err_idx < ARRAY_SIZE(ffa_linux_errmap)) + return ffa_linux_errmap[err_idx]; return -EINVAL; } diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c index 784cf0027d..6c7e24935e 100644 --- a/drivers/firmware/arm_scmi/bus.c +++ b/drivers/firmware/arm_scmi/bus.c @@ -104,11 +104,6 @@ static int scmi_dev_probe(struct device *dev) { struct scmi_driver *scmi_drv = to_scmi_driver(dev->driver); struct scmi_device *scmi_dev = to_scmi_dev(dev); - const struct scmi_device_id *id; - - id = scmi_dev_match_id(scmi_dev, scmi_drv); - if (!id) - return -ENODEV; if (!scmi_dev->handle) return -EPROBE_DEFER; @@ -139,6 +134,9 @@ int scmi_driver_register(struct scmi_driver *driver, struct module *owner, { int retval; + if (!driver->probe) + return -EINVAL; + retval = scmi_protocol_device_request(driver->id_table); if (retval) return retval; diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c index 66e5e694be..9b2e8d42a9 100644 --- a/drivers/firmware/arm_scmi/driver.c +++ b/drivers/firmware/arm_scmi/driver.c @@ -47,7 +47,6 @@ enum scmi_error_codes { SCMI_ERR_GENERIC = -8, /* Generic Error */ SCMI_ERR_HARDWARE = -9, /* Hardware Error */ SCMI_ERR_PROTOCOL = -10,/* Protocol Error */ - SCMI_ERR_MAX }; /* List of all SCMI devices active in system */ @@ -166,8 +165,10 @@ static const int scmi_linux_errmap[] = { static inline int scmi_to_linux_errno(int errno) { - if (errno < SCMI_SUCCESS && errno > SCMI_ERR_MAX) - return scmi_linux_errmap[-errno]; + int err_idx = -errno; + + if (err_idx >= SCMI_SUCCESS && err_idx < ARRAY_SIZE(scmi_linux_errmap)) + return scmi_linux_errmap[err_idx]; return -EIO; } @@ -1025,8 +1026,9 @@ static int __scmi_xfer_info_init(struct scmi_info *sinfo, const struct scmi_desc *desc = sinfo->desc; /* Pre-allocated messages, no more than what hdr.seq can support */ - if (WARN_ON(desc->max_msg >= MSG_TOKEN_MAX)) { - dev_err(dev, "Maximum message of %d exceeds supported %ld\n", + if (WARN_ON(!desc->max_msg || desc->max_msg > MSG_TOKEN_MAX)) { + dev_err(dev, + "Invalid maximum messages %d, not in range [1 - %lu]\n", desc->max_msg, MSG_TOKEN_MAX); return -EINVAL; } @@ -1137,6 +1139,8 @@ scmi_txrx_setup(struct scmi_info *info, struct device *dev, int prot_id) * @proto_id and @name: if device was still not existent it is created as a * child of the specified SCMI instance @info and its transport properly * initialized as usual. + * + * Return: A properly initialized scmi device, NULL otherwise. */ static inline struct scmi_device * scmi_get_protocol_device(struct device_node *np, struct scmi_info *info, diff --git a/drivers/firmware/arm_scmi/notify.c b/drivers/firmware/arm_scmi/notify.c index d860bebd98..0efd20cd9d 100644 --- a/drivers/firmware/arm_scmi/notify.c +++ b/drivers/firmware/arm_scmi/notify.c @@ -1457,6 +1457,8 @@ static void scmi_devm_release_notifier(struct device *dev, void *res) * * Generic devres managed helper to register a notifier_block against a * protocol event. + * + * Return: 0 on Success */ static int scmi_devm_notifier_register(struct scmi_device *sdev, u8 proto_id, u8 evt_id, @@ -1523,6 +1525,8 @@ static int scmi_devm_notifier_match(struct device *dev, void *res, void *data) * Generic devres managed helper to explicitly un-register a notifier_block * against a protocol event, which was previously registered using the above * @scmi_devm_notifier_register. + * + * Return: 0 on Success */ static int scmi_devm_notifier_unregister(struct scmi_device *sdev, u8 proto_id, u8 evt_id, diff --git a/drivers/firmware/arm_scmi/sensors.c b/drivers/firmware/arm_scmi/sensors.c index 2c88aa2215..3084715863 100644 --- a/drivers/firmware/arm_scmi/sensors.c +++ b/drivers/firmware/arm_scmi/sensors.c @@ -166,7 +166,8 @@ struct scmi_msg_sensor_reading_get { struct scmi_resp_sensor_reading_complete { __le32 id; - __le64 readings; + __le32 readings_low; + __le32 readings_high; }; struct scmi_sensor_reading_resp { @@ -717,7 +718,8 @@ static int scmi_sensor_reading_get(const struct scmi_protocol_handle *ph, resp = t->rx.buf; if (le32_to_cpu(resp->id) == sensor_id) - *value = get_unaligned_le64(&resp->readings); + *value = + get_unaligned_le64(&resp->readings_low); else ret = -EPROTO; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index db16b3e836..cf62f43a03 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -269,7 +269,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv, uint64_t *size); int amdgpu_amdkfd_gpuvm_map_memory_to_gpu( - struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv, bool *table_freed); + struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv); int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu( struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv); int amdgpu_amdkfd_gpuvm_sync_memory( diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 3b8e1ee8c4..4fb15750b9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -1057,8 +1057,7 @@ static void unmap_bo_from_gpuvm(struct kgd_mem *mem, static int update_gpuvm_pte(struct kgd_mem *mem, struct kfd_mem_attachment *entry, - struct amdgpu_sync *sync, - bool *table_freed) + struct amdgpu_sync *sync) { struct amdgpu_bo_va *bo_va = entry->bo_va; struct amdgpu_device *adev = entry->adev; @@ -1069,7 +1068,7 @@ static int update_gpuvm_pte(struct kgd_mem *mem, return ret; /* Update the page tables */ - ret = amdgpu_vm_bo_update(adev, bo_va, false, table_freed); + ret = amdgpu_vm_bo_update(adev, bo_va, false); if (ret) { pr_err("amdgpu_vm_bo_update failed\n"); return ret; @@ -1081,8 +1080,7 @@ static int update_gpuvm_pte(struct kgd_mem *mem, static int map_bo_to_gpuvm(struct kgd_mem *mem, struct kfd_mem_attachment *entry, struct amdgpu_sync *sync, - bool no_update_pte, - bool *table_freed) + bool no_update_pte) { int ret; @@ -1099,7 +1097,7 @@ static int map_bo_to_gpuvm(struct kgd_mem *mem, if (no_update_pte) return 0; - ret = update_gpuvm_pte(mem, entry, sync, table_freed); + ret = update_gpuvm_pte(mem, entry, sync); if (ret) { pr_err("update_gpuvm_pte() failed\n"); goto update_gpuvm_pte_failed; @@ -1393,8 +1391,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM; alloc_flags = AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE; alloc_flags |= (flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) ? - AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED : - AMDGPU_GEM_CREATE_NO_CPU_ACCESS; + AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED : 0; } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) { domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT; alloc_flags = 0; @@ -1597,8 +1594,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( } int amdgpu_amdkfd_gpuvm_map_memory_to_gpu( - struct kgd_dev *kgd, struct kgd_mem *mem, - void *drm_priv, bool *table_freed) + struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv) { struct amdgpu_device *adev = get_amdgpu_device(kgd); struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv); @@ -1686,7 +1682,7 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu( entry->va, entry->va + bo_size, entry); ret = map_bo_to_gpuvm(mem, entry, ctx.sync, - is_invalid_userptr, table_freed); + is_invalid_userptr); if (ret) { pr_err("Failed to map bo to gpuvm\n"); goto out_unreserve; @@ -2136,7 +2132,7 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info) continue; kfd_mem_dmaunmap_attachment(mem, attachment); - ret = update_gpuvm_pte(mem, attachment, &sync, NULL); + ret = update_gpuvm_pte(mem, attachment, &sync); if (ret) { pr_err("%s: update PTE failed\n", __func__); /* make sure this gets validated again */ @@ -2342,7 +2338,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef) continue; kfd_mem_dmaunmap_attachment(mem, attachment); - ret = update_gpuvm_pte(mem, attachment, &sync_obj, NULL); + ret = update_gpuvm_pte(mem, attachment, &sync_obj); if (ret) { pr_debug("Memory eviction: update PTE failed. Try again\n"); goto validate_map_fail; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 76fe5b71e3..30fa1f61e0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -781,7 +781,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p) if (r) return r; - r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false, NULL); + r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false); if (r) return r; @@ -792,7 +792,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p) if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) { bo_va = fpriv->csa_va; BUG_ON(!bo_va); - r = amdgpu_vm_bo_update(adev, bo_va, false, NULL); + r = amdgpu_vm_bo_update(adev, bo_va, false); if (r) return r; @@ -811,7 +811,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p) if (bo_va == NULL) continue; - r = amdgpu_vm_bo_update(adev, bo_va, false, NULL); + r = amdgpu_vm_bo_update(adev, bo_va, false); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 71beb0db01..abb928894e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -1168,6 +1168,7 @@ static const struct pci_device_id pciidlist[] = { {0x1002, 0x734F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14}, /* Renoir */ + {0x1002, 0x15E7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU}, {0x1002, 0x1636, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU}, {0x1002, 0x1638, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU}, {0x1002, 0x164C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU}, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index b3404c43a9..d0d9bc445d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -612,7 +612,7 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, if (operation == AMDGPU_VA_OP_MAP || operation == AMDGPU_VA_OP_REPLACE) { - r = amdgpu_vm_bo_update(adev, bo_va, false, NULL); + r = amdgpu_vm_bo_update(adev, bo_va, false); if (r) goto error; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c index 32ce0e679d..83af307e97 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c @@ -278,6 +278,21 @@ static bool amdgpu_msi_ok(struct amdgpu_device *adev) return true; } +static void amdgpu_restore_msix(struct amdgpu_device *adev) +{ + u16 ctrl; + + pci_read_config_word(adev->pdev, adev->pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl); + if (!(ctrl & PCI_MSIX_FLAGS_ENABLE)) + return; + + /* VF FLR */ + ctrl &= ~PCI_MSIX_FLAGS_ENABLE; + pci_write_config_word(adev->pdev, adev->pdev->msix_cap + PCI_MSIX_FLAGS, ctrl); + ctrl |= PCI_MSIX_FLAGS_ENABLE; + pci_write_config_word(adev->pdev, adev->pdev->msix_cap + PCI_MSIX_FLAGS, ctrl); +} + /** * amdgpu_irq_init - initialize interrupt handling * @@ -569,6 +584,9 @@ void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev) { int i, j, k; + if (amdgpu_sriov_vf(adev)) + amdgpu_restore_msix(adev); + for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) { if (!adev->irq.client[i].sources) continue; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index c13b02caf8..fc66aca285 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -809,7 +809,7 @@ static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev, /* query/inject/cure begin */ int amdgpu_ras_query_error_status(struct amdgpu_device *adev, - struct ras_query_if *info) + struct ras_query_if *info) { struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head); struct ras_err_data err_data = {0, 0, 0, NULL}; @@ -1043,17 +1043,32 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev, return ret; } -/* get the total error counts on all IPs */ -void amdgpu_ras_query_error_count(struct amdgpu_device *adev, - unsigned long *ce_count, - unsigned long *ue_count) +/** + * amdgpu_ras_query_error_count -- Get error counts of all IPs + * adev: pointer to AMD GPU device + * ce_count: pointer to an integer to be set to the count of correctible errors. + * ue_count: pointer to an integer to be set to the count of uncorrectible + * errors. + * + * If set, @ce_count or @ue_count, count and return the corresponding + * error counts in those integer pointers. Return 0 if the device + * supports RAS. Return -EOPNOTSUPP if the device doesn't support RAS. + */ +int amdgpu_ras_query_error_count(struct amdgpu_device *adev, + unsigned long *ce_count, + unsigned long *ue_count) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); struct ras_manager *obj; unsigned long ce, ue; if (!adev->ras_enabled || !con) - return; + return -EOPNOTSUPP; + + /* Don't count since no reporting. + */ + if (!ce_count && !ue_count) + return 0; ce = 0; ue = 0; @@ -1061,9 +1076,11 @@ void amdgpu_ras_query_error_count(struct amdgpu_device *adev, struct ras_query_if info = { .head = obj->head, }; + int res; - if (amdgpu_ras_query_error_status(adev, &info)) - return; + res = amdgpu_ras_query_error_status(adev, &info); + if (res) + return res; ce += info.ce_count; ue += info.ue_count; @@ -1074,6 +1091,8 @@ void amdgpu_ras_query_error_count(struct amdgpu_device *adev, if (ue_count) *ue_count = ue; + + return 0; } /* query/inject/cure end */ @@ -2137,9 +2156,10 @@ static void amdgpu_ras_counte_dw(struct work_struct *work) /* Cache new values. */ - amdgpu_ras_query_error_count(adev, &ce_count, &ue_count); - atomic_set(&con->ras_ce_count, ce_count); - atomic_set(&con->ras_ue_count, ue_count); + if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count) == 0) { + atomic_set(&con->ras_ce_count, ce_count); + atomic_set(&con->ras_ue_count, ue_count); + } pm_runtime_mark_last_busy(dev->dev); Out: @@ -2312,9 +2332,10 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev, /* Those are the cached values at init. */ - amdgpu_ras_query_error_count(adev, &ce_count, &ue_count); - atomic_set(&con->ras_ce_count, ce_count); - atomic_set(&con->ras_ue_count, ue_count); + if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count) == 0) { + atomic_set(&con->ras_ce_count, ce_count); + atomic_set(&con->ras_ue_count, ue_count); + } return 0; cleanup: diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h index 256cea5d34..b504ed8c9b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h @@ -490,9 +490,9 @@ int amdgpu_ras_request_reset_on_boot(struct amdgpu_device *adev, void amdgpu_ras_resume(struct amdgpu_device *adev); void amdgpu_ras_suspend(struct amdgpu_device *adev); -void amdgpu_ras_query_error_count(struct amdgpu_device *adev, - unsigned long *ce_count, - unsigned long *ue_count); +int amdgpu_ras_query_error_count(struct amdgpu_device *adev, + unsigned long *ce_count, + unsigned long *ue_count); /* error handling functions */ int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 79cfa2d684..078c068937 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -1758,7 +1758,7 @@ int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, r = vm->update_funcs->commit(¶ms, fence); if (table_freed) - *table_freed = *table_freed || params.table_freed; + *table_freed = params.table_freed; error_unlock: amdgpu_vm_eviction_unlock(vm); @@ -1816,7 +1816,6 @@ void amdgpu_vm_get_memory(struct amdgpu_vm *vm, uint64_t *vram_mem, * @adev: amdgpu_device pointer * @bo_va: requested BO and VM object * @clear: if true clear the entries - * @table_freed: return true if page table is freed * * Fill in the page table entries for @bo_va. * @@ -1824,7 +1823,7 @@ void amdgpu_vm_get_memory(struct amdgpu_vm *vm, uint64_t *vram_mem, * 0 for success, -EINVAL for failure. */ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, - bool clear, bool *table_freed) + bool clear) { struct amdgpu_bo *bo = bo_va->base.bo; struct amdgpu_vm *vm = bo_va->base.vm; @@ -1903,7 +1902,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, resv, mapping->start, mapping->last, update_flags, mapping->offset, mem, - pages_addr, last_update, table_freed); + pages_addr, last_update, NULL); if (r) return r; } @@ -2155,7 +2154,7 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev, list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) { /* Per VM BOs never need to bo cleared in the page tables */ - r = amdgpu_vm_bo_update(adev, bo_va, false, NULL); + r = amdgpu_vm_bo_update(adev, bo_va, false); if (r) return r; } @@ -2174,7 +2173,7 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev, else clear = true; - r = amdgpu_vm_bo_update(adev, bo_va, clear, NULL); + r = amdgpu_vm_bo_update(adev, bo_va, clear); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index ddb85a85cb..f8fa653d4d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -406,7 +406,7 @@ int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, struct dma_fence **fence, bool *free_table); int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, - bool clear, bool *table_freed); + bool clear); bool amdgpu_vm_evictable(struct amdgpu_bo *bo); void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, struct amdgpu_bo *bo, bool evicted); diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c index 33324427b5..7e0d8c092c 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c @@ -766,7 +766,7 @@ static const struct amdgpu_irq_src_funcs dce_virtual_crtc_irq_funcs = { static void dce_virtual_set_irq_funcs(struct amdgpu_device *adev) { - adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_VBLANK6 + 1; + adev->crtc_irq.num_types = adev->mode_info.num_crtc; adev->crtc_irq.funcs = &dce_virtual_crtc_irq_funcs; } diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c index 3ee481557f..ff2307d7ee 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c @@ -252,7 +252,7 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work) * otherwise the mailbox msg will be ruined/reseted by * the VF FLR. */ - if (!down_read_trylock(&adev->reset_sem)) + if (!down_write_trylock(&adev->reset_sem)) return; amdgpu_virt_fini_data_exchange(adev); @@ -268,7 +268,7 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work) flr_done: atomic_set(&adev->in_gpu_reset, 0); - up_read(&adev->reset_sem); + up_write(&adev->reset_sem); /* Trigger recovery for world switch failure if no TDR */ if (amdgpu_device_should_recover_gpu(adev) diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c index 48e588d3c4..9f7aac435d 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c @@ -273,7 +273,7 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work) * otherwise the mailbox msg will be ruined/reseted by * the VF FLR. */ - if (!down_read_trylock(&adev->reset_sem)) + if (!down_write_trylock(&adev->reset_sem)) return; amdgpu_virt_fini_data_exchange(adev); @@ -289,7 +289,7 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work) flr_done: atomic_set(&adev->in_gpu_reset, 0); - up_read(&adev->reset_sem); + up_write(&adev->reset_sem); /* Trigger recovery for world switch failure if no TDR */ if (amdgpu_device_should_recover_gpu(adev) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 67541c3032..e48acdd03c 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -1393,7 +1393,6 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep, long err = 0; int i; uint32_t *devices_arr = NULL; - bool table_freed = false; dev = kfd_device_by_id(GET_GPU_ID(args->handle)); if (!dev) @@ -1451,8 +1450,7 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep, goto get_mem_obj_from_handle_failed; } err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu( - peer->kgd, (struct kgd_mem *)mem, - peer_pdd->drm_priv, &table_freed); + peer->kgd, (struct kgd_mem *)mem, peer_pdd->drm_priv); if (err) { pr_err("Failed to map to gpu %d/%d\n", i, args->n_devices); @@ -1470,17 +1468,16 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep, } /* Flush TLBs after waiting for the page table updates to complete */ - if (table_freed) { - for (i = 0; i < args->n_devices; i++) { - peer = kfd_device_by_id(devices_arr[i]); - if (WARN_ON_ONCE(!peer)) - continue; - peer_pdd = kfd_get_process_device_data(peer, p); - if (WARN_ON_ONCE(!peer_pdd)) - continue; - kfd_flush_tlb(peer_pdd, TLB_FLUSH_LEGACY); - } + for (i = 0; i < args->n_devices; i++) { + peer = kfd_device_by_id(devices_arr[i]); + if (WARN_ON_ONCE(!peer)) + continue; + peer_pdd = kfd_get_process_device_data(peer, p); + if (WARN_ON_ONCE(!peer_pdd)) + continue; + kfd_flush_tlb(peer_pdd, TLB_FLUSH_LEGACY); } + kfree(devices_arr); return err; @@ -1568,27 +1565,10 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep, } args->n_success = i+1; } - mutex_unlock(&p->mutex); - - err = amdgpu_amdkfd_gpuvm_sync_memory(dev->kgd, (struct kgd_mem *) mem, true); - if (err) { - pr_debug("Sync memory failed, wait interrupted by user signal\n"); - goto sync_memory_failed; - } - - /* Flush TLBs after waiting for the page table updates to complete */ - for (i = 0; i < args->n_devices; i++) { - peer = kfd_device_by_id(devices_arr[i]); - if (WARN_ON_ONCE(!peer)) - continue; - peer_pdd = kfd_get_process_device_data(peer, p); - if (WARN_ON_ONCE(!peer_pdd)) - continue; - kfd_flush_tlb(peer_pdd, TLB_FLUSH_HEAVYWEIGHT); - } - kfree(devices_arr); + mutex_unlock(&p->mutex); + return 0; bind_process_to_device_failed: @@ -1596,7 +1576,6 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep, unmap_memory_from_gpu_failed: mutex_unlock(&p->mutex); copy_from_user_failed: -sync_memory_failed: kfree(devices_arr); return err; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index 21ec8a18ca..8a2c6fc438 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -714,8 +714,7 @@ static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd, if (err) goto err_alloc_mem; - err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(kdev->kgd, mem, - pdd->drm_priv, NULL); + err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(kdev->kgd, mem, pdd->drm_priv); if (err) goto err_map_mem; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 9a71d8919b..c7b364e4a2 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -2375,21 +2375,27 @@ static bool svm_range_skip_recover(struct svm_range *prange) static void svm_range_count_fault(struct amdgpu_device *adev, struct kfd_process *p, - struct svm_range *prange, int32_t gpuidx) + int32_t gpuidx) { struct kfd_process_device *pdd; - if (gpuidx == MAX_GPU_INSTANCE) - /* fault is on different page of same range - * or fault is skipped to recover later - */ - pdd = svm_range_get_pdd_by_adev(prange, adev); - else - /* fault recovered - * or fault cannot recover because GPU no access on the range - */ - pdd = kfd_process_device_from_gpuidx(p, gpuidx); + /* fault is on different page of same range + * or fault is skipped to recover later + * or fault is on invalid virtual address + */ + if (gpuidx == MAX_GPU_INSTANCE) { + uint32_t gpuid; + int r; + r = kfd_process_gpuid_from_kgd(p, adev, &gpuid, &gpuidx); + if (r < 0) + return; + } + + /* fault is recovered + * or fault cannot recover because GPU no access on the range + */ + pdd = kfd_process_device_from_gpuidx(p, gpuidx); if (pdd) WRITE_ONCE(pdd->faults, pdd->faults + 1); } @@ -2525,7 +2531,7 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid, mutex_unlock(&svms->lock); mmap_read_unlock(mm); - svm_range_count_fault(adev, p, prange, gpuidx); + svm_range_count_fault(adev, p, gpuidx); mmput(mm); out: diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 01e1062dc2..d3a2a5ff57 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -9191,7 +9191,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || \ defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) /* restore the backlight level */ - if (dm->backlight_dev) + if (dm->backlight_dev && (amdgpu_dm_backlight_get_level(dm) != dm->brightness[0])) amdgpu_dm_backlight_set_level(dm, dm->brightness[0]); #endif /* diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index b8832bdde2..6da226bf11 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -1620,11 +1620,12 @@ enum dc_status dpcd_configure_lttpr_mode(struct dc_link *link, struct link_train { enum dc_status status = DC_OK; - if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) - status = configure_lttpr_mode_non_transparent(link, lt_settings); - else + if (lt_settings->lttpr_mode == LTTPR_MODE_TRANSPARENT) status = configure_lttpr_mode_transparent(link); + else if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) + status = configure_lttpr_mode_non_transparent(link, lt_settings); + return status; } @@ -1784,7 +1785,6 @@ bool perform_link_training_with_retries( link_enc = stream->link_enc; else link_enc = link->link_enc; - ASSERT(link_enc); /* We need to do this before the link training to ensure the idle pattern in SST * mode will be sent right after the link training diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c index fc1fc1a4bf..836864a5a5 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c @@ -390,7 +390,7 @@ void dcn31_update_info_frame(struct pipe_ctx *pipe_ctx) is_hdmi_tmds = dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal); is_dp = dc_is_dp_signal(pipe_ctx->stream->signal); - if (!is_hdmi_tmds) + if (!is_hdmi_tmds && !is_dp) return; if (is_hdmi_tmds) diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/inc/smu_v13_0.h index 6119a36b2c..3fea2430de 100644 --- a/drivers/gpu/drm/amd/pm/inc/smu_v13_0.h +++ b/drivers/gpu/drm/amd/pm/inc/smu_v13_0.h @@ -26,6 +26,7 @@ #include "amdgpu_smu.h" #define SMU13_DRIVER_IF_VERSION_INV 0xFFFFFFFF +#define SMU13_DRIVER_IF_VERSION_YELLOW_CARP 0x03 #define SMU13_DRIVER_IF_VERSION_ALDE 0x07 /* MP Apertures */ diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c index 388c5cb5c6..0a5d46ac9c 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c @@ -1528,6 +1528,7 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state) case CHIP_SIENNA_CICHLID: case CHIP_NAVY_FLOUNDER: case CHIP_DIMGREY_CAVEFISH: + case CHIP_BEIGE_GOBY: if (amdgpu_runtime_pm == 2) ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile b/drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile index 9b3a8503f5..d4c4c49576 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile @@ -23,7 +23,7 @@ # Makefile for the 'smu manager' sub-component of powerplay. # It provides the smu management services for the driver. -SMU13_MGR = smu_v13_0.o aldebaran_ppt.o smu_v13_0_1.o yellow_carp_ppt.o +SMU13_MGR = smu_v13_0.o aldebaran_ppt.o yellow_carp_ppt.o AMD_SWSMU_SMU13MGR = $(addprefix $(AMD_SWSMU_PATH)/smu13/,$(SMU13_MGR)) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c index a3dc7194aa..a421ba85bd 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c @@ -210,6 +210,9 @@ int smu_v13_0_check_fw_version(struct smu_context *smu) case CHIP_ALDEBARAN: smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_ALDE; break; + case CHIP_YELLOW_CARP: + smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_YELLOW_CARP; + break; default: dev_err(smu->adev->dev, "smu unsupported asic type:%d.\n", smu->adev->asic_type); smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_INV; @@ -694,6 +697,27 @@ int smu_v13_0_set_allowed_mask(struct smu_context *smu) return ret; } +int smu_v13_0_gfx_off_control(struct smu_context *smu, bool enable) +{ + int ret = 0; + struct amdgpu_device *adev = smu->adev; + + switch (adev->asic_type) { + case CHIP_YELLOW_CARP: + if (!(adev->pm.pp_feature & PP_GFXOFF_MASK)) + return 0; + if (enable) + ret = smu_cmn_send_smc_msg(smu, SMU_MSG_AllowGfxOff, NULL); + else + ret = smu_cmn_send_smc_msg(smu, SMU_MSG_DisallowGfxOff, NULL); + break; + default: + break; + } + + return ret; +} + int smu_v13_0_system_features_control(struct smu_context *smu, bool en) { diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c index 18a1ffdca2..0cfeb9fc7c 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c @@ -25,7 +25,7 @@ #include "amdgpu.h" #include "amdgpu_smu.h" -#include "smu_v13_0_1.h" +#include "smu_v13_0.h" #include "smu13_driver_if_yellow_carp.h" #include "yellow_carp_ppt.h" #include "smu_v13_0_1_ppsmc.h" @@ -186,6 +186,22 @@ static int yellow_carp_init_smc_tables(struct smu_context *smu) return -ENOMEM; } +static int yellow_carp_fini_smc_tables(struct smu_context *smu) +{ + struct smu_table_context *smu_table = &smu->smu_table; + + kfree(smu_table->clocks_table); + smu_table->clocks_table = NULL; + + kfree(smu_table->metrics_table); + smu_table->metrics_table = NULL; + + kfree(smu_table->watermarks_table); + smu_table->watermarks_table = NULL; + + return 0; +} + static int yellow_carp_system_features_control(struct smu_context *smu, bool en) { struct smu_feature *feature = &smu->smu_feature; @@ -282,13 +298,9 @@ static int yellow_carp_mode_reset(struct smu_context *smu, int type) if (index < 0) return index == -EACCES ? 0 : index; - mutex_lock(&smu->message_lock); - - ret = smu_cmn_send_msg_without_waiting(smu, (uint16_t)index, type); - - mutex_unlock(&smu->message_lock); - - mdelay(10); + ret = smu_cmn_send_smc_msg_with_param(smu, (uint16_t)index, type, NULL); + if (ret) + dev_err(smu->adev->dev, "Failed to mode reset!\n"); return ret; } @@ -659,6 +671,13 @@ static ssize_t yellow_carp_get_gpu_metrics(struct smu_context *smu, return sizeof(struct gpu_metrics_v2_1); } +static int yellow_carp_set_default_dpm_tables(struct smu_context *smu) +{ + struct smu_table_context *smu_table = &smu->smu_table; + + return smu_cmn_update_table(smu, SMU_TABLE_DPMCLOCKS, 0, smu_table->clocks_table, false); +} + static int yellow_carp_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABLE_COMMAND type, long input[], uint32_t size) { @@ -1203,17 +1222,17 @@ static int yellow_carp_set_fine_grain_gfx_freq_parameters(struct smu_context *sm } static const struct pptable_funcs yellow_carp_ppt_funcs = { - .check_fw_status = smu_v13_0_1_check_fw_status, - .check_fw_version = smu_v13_0_1_check_fw_version, + .check_fw_status = smu_v13_0_check_fw_status, + .check_fw_version = smu_v13_0_check_fw_version, .init_smc_tables = yellow_carp_init_smc_tables, - .fini_smc_tables = smu_v13_0_1_fini_smc_tables, - .get_vbios_bootup_values = smu_v13_0_1_get_vbios_bootup_values, + .fini_smc_tables = yellow_carp_fini_smc_tables, + .get_vbios_bootup_values = smu_v13_0_get_vbios_bootup_values, .system_features_control = yellow_carp_system_features_control, .send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param, .send_smc_msg = smu_cmn_send_smc_msg, .dpm_set_vcn_enable = yellow_carp_dpm_set_vcn_enable, .dpm_set_jpeg_enable = yellow_carp_dpm_set_jpeg_enable, - .set_default_dpm_table = smu_v13_0_1_set_default_dpm_tables, + .set_default_dpm_table = yellow_carp_set_default_dpm_tables, .read_sensor = yellow_carp_read_sensor, .is_dpm_running = yellow_carp_is_dpm_running, .set_watermarks_table = yellow_carp_set_watermarks_table, @@ -1222,8 +1241,8 @@ static const struct pptable_funcs yellow_carp_ppt_funcs = { .get_gpu_metrics = yellow_carp_get_gpu_metrics, .get_enabled_mask = smu_cmn_get_enabled_32_bits_mask, .get_pp_feature_mask = smu_cmn_get_pp_feature_mask, - .set_driver_table_location = smu_v13_0_1_set_driver_table_location, - .gfx_off_control = smu_v13_0_1_gfx_off_control, + .set_driver_table_location = smu_v13_0_set_driver_table_location, + .gfx_off_control = smu_v13_0_gfx_off_control, .post_init = yellow_carp_post_smu_init, .mode2_reset = yellow_carp_mode2_reset, .get_dpm_ultimate_freq = yellow_carp_get_dpm_ultimate_freq, diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c index f4fb68e895..e382b7f235 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c @@ -62,6 +62,7 @@ static void try_to_writeback(struct drm_i915_gem_object *obj, switch (obj->mm.madv) { case I915_MADV_DONTNEED: i915_gem_object_truncate(obj); + return; case __I915_MADV_PURGED: return; } diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c index 21c8b7350b..da4f5eb43a 100644 --- a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c +++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c @@ -303,10 +303,7 @@ static void __gen8_ppgtt_alloc(struct i915_address_space * const vm, __i915_gem_object_pin_pages(pt->base); i915_gem_object_make_unshrinkable(pt->base); - if (lvl || - gen8_pt_count(*start, end) < I915_PDES || - intel_vgpu_active(vm->i915)) - fill_px(pt, vm->scratch[lvl]->encode); + fill_px(pt, vm->scratch[lvl]->encode); spin_lock(&pd->lock); if (likely(!pd->entry[idx])) { diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 1411787542..1e8a971a86 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -1169,7 +1169,7 @@ static int msm_gem_new_impl(struct drm_device *dev, case MSM_BO_CACHED_COHERENT: if (priv->has_cached_coherent) break; - /* fallthrough */ + fallthrough; default: DRM_DEV_ERROR(dev->dev, "invalid cache flag: %x\n", (flags & MSM_BO_CACHE_MASK)); diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35510.c b/drivers/gpu/drm/panel/panel-novatek-nt35510.c index ef70140c5b..873cbd38e6 100644 --- a/drivers/gpu/drm/panel/panel-novatek-nt35510.c +++ b/drivers/gpu/drm/panel/panel-novatek-nt35510.c @@ -706,9 +706,7 @@ static int nt35510_power_on(struct nt35510 *nt) if (ret) return ret; - ret = nt35510_read_id(nt); - if (ret) - return ret; + nt35510_read_id(nt); /* Set up stuff in manufacturer control, page 1 */ ret = nt35510_send_long(nt, dsi, MCS_CMD_MAUCCTR, diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c index 19fd39d9a0..37a1b6a6ad 100644 --- a/drivers/gpu/drm/qxl/qxl_ttm.c +++ b/drivers/gpu/drm/qxl/qxl_ttm.c @@ -127,7 +127,7 @@ static void qxl_bo_move_notify(struct ttm_buffer_object *bo, struct qxl_bo *qbo; struct qxl_device *qdev; - if (!qxl_ttm_bo_is_qxl_bo(bo)) + if (!qxl_ttm_bo_is_qxl_bo(bo) || !bo->resource) return; qbo = to_qxl_bo(bo); qdev = to_qxl(qbo->tbo.base.dev); diff --git a/drivers/gpu/drm/ttm/ttm_range_manager.c b/drivers/gpu/drm/ttm/ttm_range_manager.c index 03395386e8..f4b08a8705 100644 --- a/drivers/gpu/drm/ttm/ttm_range_manager.c +++ b/drivers/gpu/drm/ttm/ttm_range_manager.c @@ -181,6 +181,9 @@ int ttm_range_man_fini(struct ttm_device *bdev, struct drm_mm *mm = &rman->mm; int ret; + if (!man) + return 0; + ttm_resource_manager_set_used(man, false); ret = ttm_resource_manager_evict_all(bdev, man); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 6f5ea00973..45aeeca9b8 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -36,6 +36,7 @@ #include #include #include +#include #include #include diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c index 5648664f71..f2d6254154 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c @@ -354,7 +354,6 @@ static void vmw_otable_batch_takedown(struct vmw_private *dev_priv, ttm_bo_unpin(bo); ttm_bo_unreserve(bo); - ttm_bo_unpin(batch->otable_bo); ttm_bo_put(batch->otable_bo); batch->otable_bo = NULL; } diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c index dd20b01771..235f9bdaea 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c @@ -379,6 +379,7 @@ static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu) switch (idx) { case CMDQ_ERR_CERROR_ABT_IDX: dev_err(smmu->dev, "retrying command fetch\n"); + return; case CMDQ_ERR_CERROR_NONE_IDX: return; case CMDQ_ERR_CERROR_ATC_INV_IDX: diff --git a/drivers/iommu/arm/arm-smmu/qcom_iommu.c b/drivers/iommu/arm/arm-smmu/qcom_iommu.c index 25ed444ff9..021cf8f65f 100644 --- a/drivers/iommu/arm/arm-smmu/qcom_iommu.c +++ b/drivers/iommu/arm/arm-smmu/qcom_iommu.c @@ -849,12 +849,10 @@ static int qcom_iommu_device_probe(struct platform_device *pdev) ret = iommu_device_register(&qcom_iommu->iommu, &qcom_iommu_ops, dev); if (ret) { dev_err(dev, "Failed to register iommu\n"); - goto err_sysfs_remove; + return ret; } - ret = bus_set_iommu(&platform_bus_type, &qcom_iommu_ops); - if (ret) - goto err_unregister_device; + bus_set_iommu(&platform_bus_type, &qcom_iommu_ops); if (qcom_iommu->local_base) { pm_runtime_get_sync(dev); @@ -863,13 +861,6 @@ static int qcom_iommu_device_probe(struct platform_device *pdev) } return 0; - -err_unregister_device: - iommu_device_unregister(&qcom_iommu->iommu); - -err_sysfs_remove: - iommu_device_sysfs_remove(&qcom_iommu->iommu); - return ret; } static int qcom_iommu_device_remove(struct platform_device *pdev) diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index a6a07d9857..dd22fc7d51 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -2429,10 +2429,11 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, return 0; } -static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn) +static void domain_context_clear_one(struct device_domain_info *info, u8 bus, u8 devfn) { - unsigned long flags; + struct intel_iommu *iommu = info->iommu; struct context_entry *context; + unsigned long flags; u16 did_old; if (!iommu) @@ -2444,7 +2445,16 @@ static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn spin_unlock_irqrestore(&iommu->lock, flags); return; } - did_old = context_domain_id(context); + + if (sm_supported(iommu)) { + if (hw_pass_through && domain_type_is_si(info->domain)) + did_old = FLPT_DEFAULT_DID; + else + did_old = info->domain->iommu_did[iommu->seq_id]; + } else { + did_old = context_domain_id(context); + } + context_clear_entry(context); __iommu_flush_cache(iommu, context, sizeof(*context)); spin_unlock_irqrestore(&iommu->lock, flags); @@ -2462,6 +2472,8 @@ static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn 0, 0, DMA_TLB_DSI_FLUSH); + + __iommu_flush_dev_iotlb(info, 0, MAX_AGAW_PFN_WIDTH); } static inline void unlink_domain_info(struct device_domain_info *info) @@ -4425,9 +4437,9 @@ int __init intel_iommu_init(void) static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *opaque) { - struct intel_iommu *iommu = opaque; + struct device_domain_info *info = opaque; - domain_context_clear_one(iommu, PCI_BUS_NUM(alias), alias & 0xff); + domain_context_clear_one(info, PCI_BUS_NUM(alias), alias & 0xff); return 0; } @@ -4437,12 +4449,13 @@ static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *op * devices, unbinding the driver from any one of them will possibly leave * the others unable to operate. */ -static void domain_context_clear(struct intel_iommu *iommu, struct device *dev) +static void domain_context_clear(struct device_domain_info *info) { - if (!iommu || !dev || !dev_is_pci(dev)) + if (!info->iommu || !info->dev || !dev_is_pci(info->dev)) return; - pci_for_each_dma_alias(to_pci_dev(dev), &domain_context_clear_one_cb, iommu); + pci_for_each_dma_alias(to_pci_dev(info->dev), + &domain_context_clear_one_cb, info); } static void __dmar_remove_one_dev_info(struct device_domain_info *info) @@ -4459,14 +4472,13 @@ static void __dmar_remove_one_dev_info(struct device_domain_info *info) iommu = info->iommu; domain = info->domain; - if (info->dev) { + if (info->dev && !dev_is_real_dma_subdevice(info->dev)) { if (dev_is_pci(info->dev) && sm_supported(iommu)) intel_pasid_tear_down_entry(iommu, info->dev, PASID_RID2PASID, false); iommu_disable_dev_iotlb(info); - if (!dev_is_real_dma_subdevice(info->dev)) - domain_context_clear(iommu, info->dev); + domain_context_clear(info); intel_pasid_free_table(info->dev); } diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c index 94b9d8e5b9..9febfb7f30 100644 --- a/drivers/iommu/rockchip-iommu.c +++ b/drivers/iommu/rockchip-iommu.c @@ -544,12 +544,14 @@ static inline u32 rk_dma_addr_dte(dma_addr_t dt_dma) } #define DT_HI_MASK GENMASK_ULL(39, 32) +#define DTE_BASE_HI_MASK GENMASK(11, 4) #define DT_SHIFT 28 static inline phys_addr_t rk_dte_addr_phys_v2(u32 addr) { - return (phys_addr_t)(addr & RK_DTE_PT_ADDRESS_MASK) | - ((addr & DT_HI_MASK) << DT_SHIFT); + u64 addr64 = addr; + return (phys_addr_t)(addr64 & RK_DTE_PT_ADDRESS_MASK) | + ((addr64 & DTE_BASE_HI_MASK) << DT_SHIFT); } static inline u32 rk_dma_addr_dte_v2(dma_addr_t dt_dma) diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c index 0db17bcc9c..cb1a64a5c2 100644 --- a/drivers/mmc/host/jz4740_mmc.c +++ b/drivers/mmc/host/jz4740_mmc.c @@ -789,6 +789,8 @@ static irqreturn_t jz_mmc_irq_worker(int irq, void *devid) break; } } + fallthrough; + case JZ4740_MMC_STATE_DONE: break; } diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 0ff7567bd0..d22d783033 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -401,24 +401,85 @@ static int bond_vlan_rx_kill_vid(struct net_device *bond_dev, static int bond_ipsec_add_sa(struct xfrm_state *xs) { struct net_device *bond_dev = xs->xso.dev; + struct bond_ipsec *ipsec; struct bonding *bond; struct slave *slave; + int err; if (!bond_dev) return -EINVAL; + rcu_read_lock(); bond = netdev_priv(bond_dev); slave = rcu_dereference(bond->curr_active_slave); - xs->xso.real_dev = slave->dev; - bond->xs = xs; + if (!slave) { + rcu_read_unlock(); + return -ENODEV; + } - if (!(slave->dev->xfrmdev_ops - && slave->dev->xfrmdev_ops->xdo_dev_state_add)) { + if (!slave->dev->xfrmdev_ops || + !slave->dev->xfrmdev_ops->xdo_dev_state_add || + netif_is_bond_master(slave->dev)) { slave_warn(bond_dev, slave->dev, "Slave does not support ipsec offload\n"); + rcu_read_unlock(); return -EINVAL; } - return slave->dev->xfrmdev_ops->xdo_dev_state_add(xs); + ipsec = kmalloc(sizeof(*ipsec), GFP_ATOMIC); + if (!ipsec) { + rcu_read_unlock(); + return -ENOMEM; + } + xs->xso.real_dev = slave->dev; + + err = slave->dev->xfrmdev_ops->xdo_dev_state_add(xs); + if (!err) { + ipsec->xs = xs; + INIT_LIST_HEAD(&ipsec->list); + spin_lock_bh(&bond->ipsec_lock); + list_add(&ipsec->list, &bond->ipsec_list); + spin_unlock_bh(&bond->ipsec_lock); + } else { + kfree(ipsec); + } + rcu_read_unlock(); + return err; +} + +static void bond_ipsec_add_sa_all(struct bonding *bond) +{ + struct net_device *bond_dev = bond->dev; + struct bond_ipsec *ipsec; + struct slave *slave; + + rcu_read_lock(); + slave = rcu_dereference(bond->curr_active_slave); + if (!slave) + goto out; + + if (!slave->dev->xfrmdev_ops || + !slave->dev->xfrmdev_ops->xdo_dev_state_add || + netif_is_bond_master(slave->dev)) { + spin_lock_bh(&bond->ipsec_lock); + if (!list_empty(&bond->ipsec_list)) + slave_warn(bond_dev, slave->dev, + "%s: no slave xdo_dev_state_add\n", + __func__); + spin_unlock_bh(&bond->ipsec_lock); + goto out; + } + + spin_lock_bh(&bond->ipsec_lock); + list_for_each_entry(ipsec, &bond->ipsec_list, list) { + ipsec->xs->xso.real_dev = slave->dev; + if (slave->dev->xfrmdev_ops->xdo_dev_state_add(ipsec->xs)) { + slave_warn(bond_dev, slave->dev, "%s: failed to add SA\n", __func__); + ipsec->xs->xso.real_dev = NULL; + } + } + spin_unlock_bh(&bond->ipsec_lock); +out: + rcu_read_unlock(); } /** @@ -428,27 +489,77 @@ static int bond_ipsec_add_sa(struct xfrm_state *xs) static void bond_ipsec_del_sa(struct xfrm_state *xs) { struct net_device *bond_dev = xs->xso.dev; + struct bond_ipsec *ipsec; struct bonding *bond; struct slave *slave; if (!bond_dev) return; + rcu_read_lock(); bond = netdev_priv(bond_dev); slave = rcu_dereference(bond->curr_active_slave); if (!slave) - return; + goto out; - xs->xso.real_dev = slave->dev; + if (!xs->xso.real_dev) + goto out; - if (!(slave->dev->xfrmdev_ops - && slave->dev->xfrmdev_ops->xdo_dev_state_delete)) { + WARN_ON(xs->xso.real_dev != slave->dev); + + if (!slave->dev->xfrmdev_ops || + !slave->dev->xfrmdev_ops->xdo_dev_state_delete || + netif_is_bond_master(slave->dev)) { slave_warn(bond_dev, slave->dev, "%s: no slave xdo_dev_state_delete\n", __func__); - return; + goto out; } slave->dev->xfrmdev_ops->xdo_dev_state_delete(xs); +out: + spin_lock_bh(&bond->ipsec_lock); + list_for_each_entry(ipsec, &bond->ipsec_list, list) { + if (ipsec->xs == xs) { + list_del(&ipsec->list); + kfree(ipsec); + break; + } + } + spin_unlock_bh(&bond->ipsec_lock); + rcu_read_unlock(); +} + +static void bond_ipsec_del_sa_all(struct bonding *bond) +{ + struct net_device *bond_dev = bond->dev; + struct bond_ipsec *ipsec; + struct slave *slave; + + rcu_read_lock(); + slave = rcu_dereference(bond->curr_active_slave); + if (!slave) { + rcu_read_unlock(); + return; + } + + spin_lock_bh(&bond->ipsec_lock); + list_for_each_entry(ipsec, &bond->ipsec_list, list) { + if (!ipsec->xs->xso.real_dev) + continue; + + if (!slave->dev->xfrmdev_ops || + !slave->dev->xfrmdev_ops->xdo_dev_state_delete || + netif_is_bond_master(slave->dev)) { + slave_warn(bond_dev, slave->dev, + "%s: no slave xdo_dev_state_delete\n", + __func__); + } else { + slave->dev->xfrmdev_ops->xdo_dev_state_delete(ipsec->xs); + } + ipsec->xs->xso.real_dev = NULL; + } + spin_unlock_bh(&bond->ipsec_lock); + rcu_read_unlock(); } /** @@ -459,21 +570,37 @@ static void bond_ipsec_del_sa(struct xfrm_state *xs) static bool bond_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs) { struct net_device *bond_dev = xs->xso.dev; - struct bonding *bond = netdev_priv(bond_dev); - struct slave *curr_active = rcu_dereference(bond->curr_active_slave); - struct net_device *slave_dev = curr_active->dev; + struct net_device *real_dev; + struct slave *curr_active; + struct bonding *bond; + int err; - if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) - return true; + bond = netdev_priv(bond_dev); + rcu_read_lock(); + curr_active = rcu_dereference(bond->curr_active_slave); + real_dev = curr_active->dev; - if (!(slave_dev->xfrmdev_ops - && slave_dev->xfrmdev_ops->xdo_dev_offload_ok)) { - slave_warn(bond_dev, slave_dev, "%s: no slave xdo_dev_offload_ok\n", __func__); - return false; + if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) { + err = false; + goto out; } - xs->xso.real_dev = slave_dev; - return slave_dev->xfrmdev_ops->xdo_dev_offload_ok(skb, xs); + if (!xs->xso.real_dev) { + err = false; + goto out; + } + + if (!real_dev->xfrmdev_ops || + !real_dev->xfrmdev_ops->xdo_dev_offload_ok || + netif_is_bond_master(real_dev)) { + err = false; + goto out; + } + + err = real_dev->xfrmdev_ops->xdo_dev_offload_ok(skb, xs); +out: + rcu_read_unlock(); + return err; } static const struct xfrmdev_ops bond_xfrmdev_ops = { @@ -990,8 +1117,7 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active) return; #ifdef CONFIG_XFRM_OFFLOAD - if (old_active && bond->xs) - bond_ipsec_del_sa(bond->xs); + bond_ipsec_del_sa_all(bond); #endif /* CONFIG_XFRM_OFFLOAD */ if (new_active) { @@ -1066,10 +1192,7 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active) } #ifdef CONFIG_XFRM_OFFLOAD - if (new_active && bond->xs) { - xfrm_dev_state_flush(dev_net(bond->dev), bond->dev, true); - bond_ipsec_add_sa(bond->xs); - } + bond_ipsec_add_sa_all(bond); #endif /* CONFIG_XFRM_OFFLOAD */ /* resend IGMP joins since active slave has changed or @@ -3327,6 +3450,7 @@ static int bond_master_netdev_event(unsigned long event, return bond_event_changename(event_bond); case NETDEV_UNREGISTER: bond_remove_proc_entry(event_bond); + xfrm_dev_state_flush(dev_net(bond_dev), bond_dev, true); break; case NETDEV_REGISTER: bond_create_proc_entry(event_bond); @@ -4894,7 +5018,8 @@ void bond_setup(struct net_device *bond_dev) #ifdef CONFIG_XFRM_OFFLOAD /* set up xfrm device ops (only supported in active-backup right now) */ bond_dev->xfrmdev_ops = &bond_xfrmdev_ops; - bond->xs = NULL; + INIT_LIST_HEAD(&bond->ipsec_list); + spin_lock_init(&bond->ipsec_lock); #endif /* CONFIG_XFRM_OFFLOAD */ /* don't acquire bond device's netif_tx_lock when transmitting */ diff --git a/drivers/net/caif/Kconfig b/drivers/net/caif/Kconfig index a77124bc1f..709660cb38 100644 --- a/drivers/net/caif/Kconfig +++ b/drivers/net/caif/Kconfig @@ -20,15 +20,6 @@ config CAIF_TTY identified as N_CAIF. When this ldisc is opened from user space it will redirect the TTY's traffic into the CAIF stack. -config CAIF_HSI - tristate "CAIF HSI transport driver" - depends on CAIF - default n - help - The CAIF low level driver for CAIF over HSI. - Be aware that if you enable this then you also need to - enable a low-level HSI driver. - config CAIF_VIRTIO tristate "CAIF virtio transport driver" depends on CAIF && HAS_DMA diff --git a/drivers/net/caif/Makefile b/drivers/net/caif/Makefile index b1918c8c12..97f664f801 100644 --- a/drivers/net/caif/Makefile +++ b/drivers/net/caif/Makefile @@ -4,8 +4,5 @@ ccflags-$(CONFIG_CAIF_DEBUG) := -DDEBUG # Serial interface obj-$(CONFIG_CAIF_TTY) += caif_serial.o -# HSI interface -obj-$(CONFIG_CAIF_HSI) += caif_hsi.o - # Virtio interface obj-$(CONFIG_CAIF_VIRTIO) += caif_virtio.o diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c index a7e5ac60ba..1542bfb8b5 100644 --- a/drivers/net/dsa/microchip/ksz_common.c +++ b/drivers/net/dsa/microchip/ksz_common.c @@ -419,8 +419,10 @@ int ksz_switch_register(struct ksz_device *dev, if (of_property_read_u32(port, "reg", &port_num)) continue; - if (!(dev->port_mask & BIT(port_num))) + if (!(dev->port_mask & BIT(port_num))) { + of_node_put(port); return -EINVAL; + } of_get_phy_mode(port, &dev->ports[port_num].interface); } diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 961fa6b75c..beb41572d0 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -3583,6 +3583,7 @@ static const struct mv88e6xxx_ops mv88e6141_ops = { .port_set_speed_duplex = mv88e6341_port_set_speed_duplex, .port_max_speed_mode = mv88e6341_port_max_speed_mode, .port_tag_remap = mv88e6095_port_tag_remap, + .port_set_policy = mv88e6352_port_set_policy, .port_set_frame_mode = mv88e6351_port_set_frame_mode, .port_set_ucast_flood = mv88e6352_port_set_ucast_flood, .port_set_mcast_flood = mv88e6352_port_set_mcast_flood, @@ -3596,7 +3597,7 @@ static const struct mv88e6xxx_ops mv88e6141_ops = { .port_set_cmode = mv88e6341_port_set_cmode, .port_setup_message_port = mv88e6xxx_setup_message_port, .stats_snapshot = mv88e6390_g1_stats_snapshot, - .stats_set_histogram = mv88e6095_g1_stats_set_histogram, + .stats_set_histogram = mv88e6390_g1_stats_set_histogram, .stats_get_sset_count = mv88e6320_stats_get_sset_count, .stats_get_strings = mv88e6320_stats_get_strings, .stats_get_stats = mv88e6390_stats_get_stats, @@ -3606,6 +3607,9 @@ static const struct mv88e6xxx_ops mv88e6141_ops = { .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, + .rmu_disable = mv88e6390_g1_rmu_disable, + .atu_get_hash = mv88e6165_g1_atu_get_hash, + .atu_set_hash = mv88e6165_g1_atu_set_hash, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, .serdes_power = mv88e6390_serdes_power, @@ -3619,6 +3623,11 @@ static const struct mv88e6xxx_ops mv88e6141_ops = { .serdes_irq_enable = mv88e6390_serdes_irq_enable, .serdes_irq_status = mv88e6390_serdes_irq_status, .gpio_ops = &mv88e6352_gpio_ops, + .serdes_get_sset_count = mv88e6390_serdes_get_sset_count, + .serdes_get_strings = mv88e6390_serdes_get_strings, + .serdes_get_stats = mv88e6390_serdes_get_stats, + .serdes_get_regs_len = mv88e6390_serdes_get_regs_len, + .serdes_get_regs = mv88e6390_serdes_get_regs, .phylink_validate = mv88e6341_phylink_validate, }; @@ -4383,6 +4392,7 @@ static const struct mv88e6xxx_ops mv88e6341_ops = { .port_set_speed_duplex = mv88e6341_port_set_speed_duplex, .port_max_speed_mode = mv88e6341_port_max_speed_mode, .port_tag_remap = mv88e6095_port_tag_remap, + .port_set_policy = mv88e6352_port_set_policy, .port_set_frame_mode = mv88e6351_port_set_frame_mode, .port_set_ucast_flood = mv88e6352_port_set_ucast_flood, .port_set_mcast_flood = mv88e6352_port_set_mcast_flood, @@ -4396,7 +4406,7 @@ static const struct mv88e6xxx_ops mv88e6341_ops = { .port_set_cmode = mv88e6341_port_set_cmode, .port_setup_message_port = mv88e6xxx_setup_message_port, .stats_snapshot = mv88e6390_g1_stats_snapshot, - .stats_set_histogram = mv88e6095_g1_stats_set_histogram, + .stats_set_histogram = mv88e6390_g1_stats_set_histogram, .stats_get_sset_count = mv88e6320_stats_get_sset_count, .stats_get_strings = mv88e6320_stats_get_strings, .stats_get_stats = mv88e6390_stats_get_stats, @@ -4406,6 +4416,9 @@ static const struct mv88e6xxx_ops mv88e6341_ops = { .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, + .rmu_disable = mv88e6390_g1_rmu_disable, + .atu_get_hash = mv88e6165_g1_atu_get_hash, + .atu_set_hash = mv88e6165_g1_atu_set_hash, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, .serdes_power = mv88e6390_serdes_power, @@ -4421,6 +4434,11 @@ static const struct mv88e6xxx_ops mv88e6341_ops = { .gpio_ops = &mv88e6352_gpio_ops, .avb_ops = &mv88e6390_avb_ops, .ptp_ops = &mv88e6352_ptp_ops, + .serdes_get_sset_count = mv88e6390_serdes_get_sset_count, + .serdes_get_strings = mv88e6390_serdes_get_strings, + .serdes_get_stats = mv88e6390_serdes_get_stats, + .serdes_get_regs_len = mv88e6390_serdes_get_regs_len, + .serdes_get_regs = mv88e6390_serdes_get_regs, .phylink_validate = mv88e6341_phylink_validate, }; diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c index 4f0545605f..ced8c9cb29 100644 --- a/drivers/net/dsa/sja1105/sja1105_main.c +++ b/drivers/net/dsa/sja1105/sja1105_main.c @@ -122,14 +122,12 @@ static int sja1105_init_mac_settings(struct sja1105_private *priv) for (i = 0; i < ds->num_ports; i++) { mac[i] = default_mac; - if (i == dsa_upstream_port(priv->ds, i)) { - /* STP doesn't get called for CPU port, so we need to - * set the I/O parameters statically. - */ - mac[i].dyn_learn = true; - mac[i].ingress = true; - mac[i].egress = true; - } + + /* Let sja1105_bridge_stp_state_set() keep address learning + * enabled for the CPU port. + */ + if (dsa_is_cpu_port(ds, i)) + priv->learn_ena |= BIT(i); } return 0; diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c index 7dff203508..f19370c334 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c @@ -594,6 +594,11 @@ int atl1c_phy_init(struct atl1c_hw *hw) int ret_val; u16 mii_bmcr_data = BMCR_RESET; + if (hw->nic_type == athr_mt) { + hw->phy_configured = true; + return 0; + } + if ((atl1c_read_phy_reg(hw, MII_PHYSID1, &hw->phy_id1) != 0) || (atl1c_read_phy_reg(hw, MII_PHYSID2, &hw->phy_id2) != 0)) { dev_err(&pdev->dev, "Error get phy ID\n"); diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index e01156434a..8da6f867b6 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -1643,7 +1643,8 @@ static void bcmgenet_power_up(struct bcmgenet_priv *priv, switch (mode) { case GENET_POWER_PASSIVE: - reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS); + reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS | + EXT_ENERGY_DET_MASK); if (GENET_IS_V5(priv)) { reg &= ~(EXT_PWR_DOWN_PHY_EN | EXT_PWR_DOWN_PHY_RD | @@ -3245,15 +3246,21 @@ static void bcmgenet_get_hw_addr(struct bcmgenet_priv *priv, /* Returns a reusable dma control register value */ static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv, bool flush_rx) { + unsigned int i; u32 reg; u32 dma_ctrl; /* disable DMA */ dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN; + for (i = 0; i < priv->hw_params->tx_queues; i++) + dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT)); reg = bcmgenet_tdma_readl(priv, DMA_CTRL); reg &= ~dma_ctrl; bcmgenet_tdma_writel(priv, reg, DMA_CTRL); + dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN; + for (i = 0; i < priv->hw_params->rx_queues; i++) + dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT)); reg = bcmgenet_rdma_readl(priv, DMA_CTRL); reg &= ~dma_ctrl; bcmgenet_rdma_writel(priv, reg, DMA_CTRL); @@ -3308,7 +3315,6 @@ static int bcmgenet_open(struct net_device *dev) { struct bcmgenet_priv *priv = netdev_priv(dev); unsigned long dma_ctrl; - u32 reg; int ret; netif_dbg(priv, ifup, dev, "bcmgenet_open\n"); @@ -3334,12 +3340,6 @@ static int bcmgenet_open(struct net_device *dev) bcmgenet_set_hw_addr(priv, dev->dev_addr); - if (priv->internal_phy) { - reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); - reg |= EXT_ENERGY_DET_MASK; - bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); - } - /* Disable RX/TX DMA and flush TX and RX queues */ dma_ctrl = bcmgenet_dma_disable(priv, true); @@ -4158,7 +4158,6 @@ static int bcmgenet_resume(struct device *d) struct bcmgenet_priv *priv = netdev_priv(dev); struct bcmgenet_rxnfc_rule *rule; unsigned long dma_ctrl; - u32 reg; int ret; if (!netif_running(dev)) @@ -4195,12 +4194,6 @@ static int bcmgenet_resume(struct device *d) if (rule->state != BCMGENET_RXNFC_STATE_UNUSED) bcmgenet_hfb_create_rxnfc_filter(priv, rule); - if (priv->internal_phy) { - reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); - reg |= EXT_ENERGY_DET_MASK; - bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); - } - /* Disable RX/TX DMA and flush TX queues */ dma_ctrl = bcmgenet_dma_disable(priv, false); diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c index facde824bc..e31a5a397f 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c @@ -186,12 +186,6 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv, reg |= CMD_RX_EN; bcmgenet_umac_writel(priv, reg, UMAC_CMD); - if (priv->hw_params->flags & GENET_HAS_EXT) { - reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); - reg &= ~EXT_ENERGY_DET_MASK; - bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); - } - reg = UMAC_IRQ_MPD_R; if (hfb_enable) reg |= UMAC_IRQ_HFB_SM | UMAC_IRQ_HFB_MM; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 762113a04d..dbf9a0e660 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -2643,6 +2643,9 @@ static void detach_ulds(struct adapter *adap) { unsigned int i; + if (!is_uld(adap)) + return; + mutex_lock(&uld_mutex); list_del(&adap->list_node); @@ -3894,7 +3897,6 @@ static const struct net_device_ops cxgb4_mgmt_netdev_ops = { .ndo_set_vf_vlan = cxgb4_mgmt_set_vf_vlan, .ndo_set_vf_link_state = cxgb4_mgmt_set_vf_link_state, }; -#endif static void cxgb4_mgmt_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) @@ -3909,6 +3911,7 @@ static void cxgb4_mgmt_get_drvinfo(struct net_device *dev, static const struct ethtool_ops cxgb4_mgmt_ethtool_ops = { .get_drvinfo = cxgb4_mgmt_get_drvinfo, }; +#endif static void notify_fatal_err(struct work_struct *work) { @@ -7141,10 +7144,13 @@ static void remove_one(struct pci_dev *pdev) */ destroy_workqueue(adapter->workq); - if (is_uld(adapter)) { - detach_ulds(adapter); - t4_uld_clean_up(adapter); - } + detach_ulds(adapter); + + for_each_port(adapter, i) + if (adapter->port[i]->reg_state == NETREG_REGISTERED) + unregister_netdev(adapter->port[i]); + + t4_uld_clean_up(adapter); adap_free_hma_mem(adapter); @@ -7152,10 +7158,6 @@ static void remove_one(struct pci_dev *pdev) cxgb4_free_mps_ref_entries(adapter); - for_each_port(adapter, i) - if (adapter->port[i]->reg_state == NETREG_REGISTERED) - unregister_netdev(adapter->port[i]); - debugfs_remove_recursive(adapter->debugfs_root); if (!is_t4(adapter->params.chip)) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c index 743af9e654..17faac7158 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c @@ -581,6 +581,9 @@ void t4_uld_clean_up(struct adapter *adap) { unsigned int i; + if (!is_uld(adap)) + return; + mutex_lock(&uld_mutex); for (i = 0; i < CXGB4_ULD_MAX; i++) { if (!adap->uld[i].handle) diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c index 867e87af34..099a2bc5ae 100644 --- a/drivers/net/ethernet/google/gve/gve_main.c +++ b/drivers/net/ethernet/google/gve/gve_main.c @@ -1469,7 +1469,7 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent) err = pci_enable_device(pdev); if (err) - return -ENXIO; + return err; err = pci_request_regions(pdev, "gvnic-cfg"); if (err) @@ -1477,19 +1477,12 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pci_set_master(pdev); - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); if (err) { dev_err(&pdev->dev, "Failed to set dma mask: err=%d\n", err); goto abort_with_pci_region; } - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); - if (err) { - dev_err(&pdev->dev, - "Failed to set consistent dma mask: err=%d\n", err); - goto abort_with_pci_region; - } - reg_bar = pci_iomap(pdev, GVE_REGISTER_BAR, 0); if (!reg_bar) { dev_err(&pdev->dev, "Failed to map pci bar!\n"); @@ -1512,6 +1505,7 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent) dev = alloc_etherdev_mqs(sizeof(*priv), max_tx_queues, max_rx_queues); if (!dev) { dev_err(&pdev->dev, "could not allocate netdev\n"); + err = -ENOMEM; goto abort_with_db_bar; } SET_NETDEV_DEV(dev, &pdev->dev); @@ -1565,7 +1559,7 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent) err = register_netdev(dev); if (err) - goto abort_with_wq; + goto abort_with_gve_init; dev_info(&pdev->dev, "GVE version %s\n", gve_version_str); dev_info(&pdev->dev, "GVE queue format %d\n", (int)priv->queue_format); @@ -1573,6 +1567,9 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent) queue_work(priv->gve_wq, &priv->service_task); return 0; +abort_with_gve_init: + gve_teardown_priv_resources(priv); + abort_with_wq: destroy_workqueue(priv->gve_wq); @@ -1590,7 +1587,7 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent) abort_with_enabled: pci_disable_device(pdev); - return -ENXIO; + return err; } static void gve_remove(struct pci_dev *pdev) diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c index 77bb8227f8..8500621b2c 100644 --- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c +++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c @@ -566,13 +566,6 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx, return 0; } - /* Prefetch the payload header. */ - prefetch((char *)buf_state->addr + buf_state->page_info.page_offset); -#if L1_CACHE_BYTES < 128 - prefetch((char *)buf_state->addr + buf_state->page_info.page_offset + - L1_CACHE_BYTES); -#endif - if (eop && buf_len <= priv->rx_copybreak) { rx->skb_head = gve_rx_copy(priv->dev, napi, &buf_state->page_info, buf_len, 0); diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 374a75d4fa..ed77191d19 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -2420,9 +2420,10 @@ static int do_passive_init(struct ibmvnic_adapter *adapter) static void __ibmvnic_reset(struct work_struct *work) { - struct ibmvnic_rwi *rwi; struct ibmvnic_adapter *adapter; bool saved_state = false; + struct ibmvnic_rwi *tmprwi; + struct ibmvnic_rwi *rwi; unsigned long flags; u32 reset_state; int rc = 0; @@ -2489,7 +2490,7 @@ static void __ibmvnic_reset(struct work_struct *work) } else { rc = do_reset(adapter, rwi, reset_state); } - kfree(rwi); + tmprwi = rwi; adapter->last_reset_time = jiffies; if (rc) @@ -2497,8 +2498,23 @@ static void __ibmvnic_reset(struct work_struct *work) rwi = get_next_rwi(adapter); + /* + * If there is another reset queued, free the previous rwi + * and process the new reset even if previous reset failed + * (the previous reset could have failed because of a fail + * over for instance, so process the fail over). + * + * If there are no resets queued and the previous reset failed, + * the adapter would be in an undefined state. So retry the + * previous reset as a hard reset. + */ + if (rwi) + kfree(tmprwi); + else if (rc) + rwi = tmprwi; + if (rwi && (rwi->reset_reason == VNIC_RESET_FAILOVER || - rwi->reset_reason == VNIC_RESET_MOBILITY)) + rwi->reset_reason == VNIC_RESET_MOBILITY || rc)) adapter->force_reset_recovery = true; } diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index d150dade06..757a54c39e 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -7664,6 +7664,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) err_ioremap: free_netdev(netdev); err_alloc_etherdev: + pci_disable_pcie_error_reporting(pdev); pci_release_mem_regions(pdev); err_pci_reg: err_dma: diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c index dbcae92bb1..adfa2768f0 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c @@ -2227,6 +2227,7 @@ static int fm10k_probe(struct pci_dev *pdev, const struct pci_device_id *ent) err_ioremap: free_netdev(netdev); err_alloc_netdev: + pci_disable_pcie_error_reporting(pdev); pci_release_mem_regions(pdev); err_pci_reg: err_dma: diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index e612c24fa3..44bafedd09 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -3798,6 +3798,7 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) err_ioremap: free_netdev(netdev); err_alloc_etherdev: + pci_disable_pcie_error_reporting(pdev); pci_release_regions(pdev); err_pci_reg: err_dma: diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 7e6435dc7e..171a7a629b 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -931,6 +931,7 @@ static void igb_configure_msix(struct igb_adapter *adapter) **/ static int igb_request_msix(struct igb_adapter *adapter) { + unsigned int num_q_vectors = adapter->num_q_vectors; struct net_device *netdev = adapter->netdev; int i, err = 0, vector = 0, free_vector = 0; @@ -939,7 +940,13 @@ static int igb_request_msix(struct igb_adapter *adapter) if (err) goto err_out; - for (i = 0; i < adapter->num_q_vectors; i++) { + if (num_q_vectors > MAX_Q_VECTORS) { + num_q_vectors = MAX_Q_VECTORS; + dev_warn(&adapter->pdev->dev, + "The number of queue vectors (%d) is higher than max allowed (%d)\n", + adapter->num_q_vectors, MAX_Q_VECTORS); + } + for (i = 0; i < num_q_vectors; i++) { struct igb_q_vector *q_vector = adapter->q_vector[i]; vector++; @@ -1678,14 +1685,15 @@ static bool is_any_txtime_enabled(struct igb_adapter *adapter) **/ static void igb_config_tx_modes(struct igb_adapter *adapter, int queue) { - struct igb_ring *ring = adapter->tx_ring[queue]; struct net_device *netdev = adapter->netdev; struct e1000_hw *hw = &adapter->hw; + struct igb_ring *ring; u32 tqavcc, tqavctrl; u16 value; WARN_ON(hw->mac.type != e1000_i210); WARN_ON(queue < 0 || queue > 1); + ring = adapter->tx_ring[queue]; /* If any of the Qav features is enabled, configure queues as SR and * with HIGH PRIO. If none is, then configure them with LOW PRIO and @@ -3615,6 +3623,7 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) err_ioremap: free_netdev(netdev); err_alloc_etherdev: + pci_disable_pcie_error_reporting(pdev); pci_release_mem_regions(pdev); err_pci_reg: err_dma: @@ -4835,6 +4844,8 @@ static void igb_clean_tx_ring(struct igb_ring *tx_ring) DMA_TO_DEVICE); } + tx_buffer->next_to_watch = NULL; + /* move us one more past the eop_desc for start of next pkt */ tx_buffer++; i++; diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h index 9e0bbb2e55..5901ed9fb5 100644 --- a/drivers/net/ethernet/intel/igc/igc.h +++ b/drivers/net/ethernet/intel/igc/igc.h @@ -578,7 +578,7 @@ static inline s32 igc_read_phy_reg(struct igc_hw *hw, u32 offset, u16 *data) if (hw->phy.ops.read_reg) return hw->phy.ops.read_reg(hw, offset, data); - return 0; + return -EOPNOTSUPP; } void igc_reinit_locked(struct igc_adapter *); diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 9532309509..e29aadbc67 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -232,6 +232,8 @@ static void igc_clean_tx_ring(struct igc_ring *tx_ring) igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); } + tx_buffer->next_to_watch = NULL; + /* move us one more past the eop_desc for start of next pkt */ tx_buffer++; i++; @@ -6054,6 +6056,7 @@ static int igc_probe(struct pci_dev *pdev, err_ioremap: free_netdev(netdev); err_alloc_etherdev: + pci_disable_pcie_error_reporting(pdev); pci_release_mem_regions(pdev); err_pci_reg: err_dma: diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index ffff69efd7..913253f8ec 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -11067,6 +11067,7 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state); free_netdev(netdev); err_alloc_etherdev: + pci_disable_pcie_error_reporting(pdev); pci_release_mem_regions(pdev); err_pci_reg: err_dma: diff --git a/drivers/net/ethernet/intel/ixgbevf/ipsec.c b/drivers/net/ethernet/intel/ixgbevf/ipsec.c index caaea2c920..e3e4676af9 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ipsec.c +++ b/drivers/net/ethernet/intel/ixgbevf/ipsec.c @@ -211,7 +211,7 @@ struct xfrm_state *ixgbevf_ipsec_find_rx_state(struct ixgbevf_ipsec *ipsec, static int ixgbevf_ipsec_parse_proto_keys(struct xfrm_state *xs, u32 *mykey, u32 *mysalt) { - struct net_device *dev = xs->xso.dev; + struct net_device *dev = xs->xso.real_dev; unsigned char *key_data; char *alg_name = NULL; int key_len; @@ -260,12 +260,15 @@ static int ixgbevf_ipsec_parse_proto_keys(struct xfrm_state *xs, **/ static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs) { - struct net_device *dev = xs->xso.dev; - struct ixgbevf_adapter *adapter = netdev_priv(dev); - struct ixgbevf_ipsec *ipsec = adapter->ipsec; + struct net_device *dev = xs->xso.real_dev; + struct ixgbevf_adapter *adapter; + struct ixgbevf_ipsec *ipsec; u16 sa_idx; int ret; + adapter = netdev_priv(dev); + ipsec = adapter->ipsec; + if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) { netdev_err(dev, "Unsupported protocol 0x%04x for IPsec offload\n", xs->id.proto); @@ -383,11 +386,14 @@ static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs) **/ static void ixgbevf_ipsec_del_sa(struct xfrm_state *xs) { - struct net_device *dev = xs->xso.dev; - struct ixgbevf_adapter *adapter = netdev_priv(dev); - struct ixgbevf_ipsec *ipsec = adapter->ipsec; + struct net_device *dev = xs->xso.real_dev; + struct ixgbevf_adapter *adapter; + struct ixgbevf_ipsec *ipsec; u16 sa_idx; + adapter = netdev_priv(dev); + ipsec = adapter->ipsec; + if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) { sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_RX_INDEX; diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 361bc4fbe2..76a7777c74 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -2299,19 +2299,19 @@ mvneta_swbm_add_rx_fragment(struct mvneta_port *pp, skb_frag_off_set(frag, pp->rx_offset_correction); skb_frag_size_set(frag, data_len); __skb_frag_set_page(frag, page); - - /* last fragment */ - if (len == *size) { - struct skb_shared_info *sinfo; - - sinfo = xdp_get_shared_info_from_buff(xdp); - sinfo->nr_frags = xdp_sinfo->nr_frags; - memcpy(sinfo->frags, xdp_sinfo->frags, - sinfo->nr_frags * sizeof(skb_frag_t)); - } } else { page_pool_put_full_page(rxq->page_pool, page, true); } + + /* last fragment */ + if (len == *size) { + struct skb_shared_info *sinfo; + + sinfo = xdp_get_shared_info_from_buff(xdp); + sinfo->nr_frags = xdp_sinfo->nr_frags; + memcpy(sinfo->frags, xdp_sinfo->frags, + sinfo->nr_frags * sizeof(skb_frag_t)); + } *size -= len; } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c index fac6474ad6..9169849881 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c @@ -86,6 +86,22 @@ bool is_lmac_valid(struct cgx *cgx, int lmac_id) return test_bit(lmac_id, &cgx->lmac_bmap); } +/* Helper function to get sequential index + * given the enabled LMAC of a CGX + */ +static int get_sequence_id_of_lmac(struct cgx *cgx, int lmac_id) +{ + int tmp, id = 0; + + for_each_set_bit(tmp, &cgx->lmac_bmap, MAX_LMAC_PER_CGX) { + if (tmp == lmac_id) + break; + id++; + } + + return id; +} + struct mac_ops *get_mac_ops(void *cgxd) { if (!cgxd) @@ -211,37 +227,257 @@ static u64 mac2u64 (u8 *mac_addr) return mac; } +static void cfg2mac(u64 cfg, u8 *mac_addr) +{ + int i, index = 0; + + for (i = ETH_ALEN - 1; i >= 0; i--, index++) + mac_addr[i] = (cfg >> (8 * index)) & 0xFF; +} + int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr) { struct cgx *cgx_dev = cgx_get_pdata(cgx_id); + struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev); struct mac_ops *mac_ops; + int index, id; u64 cfg; + /* access mac_ops to know csr_offset */ mac_ops = cgx_dev->mac_ops; + /* copy 6bytes from macaddr */ /* memcpy(&cfg, mac_addr, 6); */ cfg = mac2u64 (mac_addr); - cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (lmac_id * 0x8)), + id = get_sequence_id_of_lmac(cgx_dev, lmac_id); + + index = id * lmac->mac_to_index_bmap.max; + + cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), cfg | CGX_DMAC_CAM_ADDR_ENABLE | ((u64)lmac_id << 49)); cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0); - cfg |= CGX_DMAC_CTL0_CAM_ENABLE; + cfg |= (CGX_DMAC_CTL0_CAM_ENABLE | CGX_DMAC_BCAST_MODE | + CGX_DMAC_MCAST_MODE); cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg); return 0; } +u64 cgx_read_dmac_ctrl(void *cgxd, int lmac_id) +{ + struct mac_ops *mac_ops; + struct cgx *cgx = cgxd; + + if (!cgxd || !is_lmac_valid(cgxd, lmac_id)) + return 0; + + cgx = cgxd; + /* Get mac_ops to know csr offset */ + mac_ops = cgx->mac_ops; + + return cgx_read(cgxd, lmac_id, CGXX_CMRX_RX_DMAC_CTL0); +} + +u64 cgx_read_dmac_entry(void *cgxd, int index) +{ + struct mac_ops *mac_ops; + struct cgx *cgx; + + if (!cgxd) + return 0; + + cgx = cgxd; + mac_ops = cgx->mac_ops; + return cgx_read(cgx, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 8))); +} + +int cgx_lmac_addr_add(u8 cgx_id, u8 lmac_id, u8 *mac_addr) +{ + struct cgx *cgx_dev = cgx_get_pdata(cgx_id); + struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev); + struct mac_ops *mac_ops; + int index, idx; + u64 cfg = 0; + int id; + + if (!lmac) + return -ENODEV; + + mac_ops = cgx_dev->mac_ops; + /* Get available index where entry is to be installed */ + idx = rvu_alloc_rsrc(&lmac->mac_to_index_bmap); + if (idx < 0) + return idx; + + id = get_sequence_id_of_lmac(cgx_dev, lmac_id); + + index = id * lmac->mac_to_index_bmap.max + idx; + + cfg = mac2u64 (mac_addr); + cfg |= CGX_DMAC_CAM_ADDR_ENABLE; + cfg |= ((u64)lmac_id << 49); + cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), cfg); + + cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0); + cfg |= (CGX_DMAC_BCAST_MODE | CGX_DMAC_CAM_ACCEPT); + + if (is_multicast_ether_addr(mac_addr)) { + cfg &= ~GENMASK_ULL(2, 1); + cfg |= CGX_DMAC_MCAST_MODE_CAM; + lmac->mcast_filters_count++; + } else if (!lmac->mcast_filters_count) { + cfg |= CGX_DMAC_MCAST_MODE; + } + + cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg); + + return idx; +} + +int cgx_lmac_addr_reset(u8 cgx_id, u8 lmac_id) +{ + struct cgx *cgx_dev = cgx_get_pdata(cgx_id); + struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev); + struct mac_ops *mac_ops; + u8 index = 0, id; + u64 cfg; + + if (!lmac) + return -ENODEV; + + mac_ops = cgx_dev->mac_ops; + /* Restore index 0 to its default init value as done during + * cgx_lmac_init + */ + set_bit(0, lmac->mac_to_index_bmap.bmap); + + id = get_sequence_id_of_lmac(cgx_dev, lmac_id); + + index = id * lmac->mac_to_index_bmap.max + index; + cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), 0); + + /* Reset CGXX_CMRX_RX_DMAC_CTL0 register to default state */ + cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0); + cfg &= ~CGX_DMAC_CAM_ACCEPT; + cfg |= (CGX_DMAC_BCAST_MODE | CGX_DMAC_MCAST_MODE); + cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg); + + return 0; +} + +/* Allows caller to change macaddress associated with index + * in dmac filter table including index 0 reserved for + * interface mac address + */ +int cgx_lmac_addr_update(u8 cgx_id, u8 lmac_id, u8 *mac_addr, u8 index) +{ + struct cgx *cgx_dev = cgx_get_pdata(cgx_id); + struct mac_ops *mac_ops; + struct lmac *lmac; + u64 cfg; + int id; + + lmac = lmac_pdata(lmac_id, cgx_dev); + if (!lmac) + return -ENODEV; + + mac_ops = cgx_dev->mac_ops; + /* Validate the index */ + if (index >= lmac->mac_to_index_bmap.max) + return -EINVAL; + + /* ensure index is already set */ + if (!test_bit(index, lmac->mac_to_index_bmap.bmap)) + return -EINVAL; + + id = get_sequence_id_of_lmac(cgx_dev, lmac_id); + + index = id * lmac->mac_to_index_bmap.max + index; + + cfg = cgx_read(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8))); + cfg &= ~CGX_RX_DMAC_ADR_MASK; + cfg |= mac2u64 (mac_addr); + + cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), cfg); + return 0; +} + +int cgx_lmac_addr_del(u8 cgx_id, u8 lmac_id, u8 index) +{ + struct cgx *cgx_dev = cgx_get_pdata(cgx_id); + struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev); + struct mac_ops *mac_ops; + u8 mac[ETH_ALEN]; + u64 cfg; + int id; + + if (!lmac) + return -ENODEV; + + mac_ops = cgx_dev->mac_ops; + /* Validate the index */ + if (index >= lmac->mac_to_index_bmap.max) + return -EINVAL; + + /* Skip deletion for reserved index i.e. index 0 */ + if (index == 0) + return 0; + + rvu_free_rsrc(&lmac->mac_to_index_bmap, index); + + id = get_sequence_id_of_lmac(cgx_dev, lmac_id); + + index = id * lmac->mac_to_index_bmap.max + index; + + /* Read MAC address to check whether it is ucast or mcast */ + cfg = cgx_read(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8))); + + cfg2mac(cfg, mac); + if (is_multicast_ether_addr(mac)) + lmac->mcast_filters_count--; + + if (!lmac->mcast_filters_count) { + cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0); + cfg &= ~GENMASK_ULL(2, 1); + cfg |= CGX_DMAC_MCAST_MODE; + cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg); + } + + cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), 0); + + return 0; +} + +int cgx_lmac_addr_max_entries_get(u8 cgx_id, u8 lmac_id) +{ + struct cgx *cgx_dev = cgx_get_pdata(cgx_id); + struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev); + + if (lmac) + return lmac->mac_to_index_bmap.max; + + return 0; +} + u64 cgx_lmac_addr_get(u8 cgx_id, u8 lmac_id) { struct cgx *cgx_dev = cgx_get_pdata(cgx_id); + struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev); struct mac_ops *mac_ops; + int index; u64 cfg; + int id; mac_ops = cgx_dev->mac_ops; - cfg = cgx_read(cgx_dev, 0, CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8); + id = get_sequence_id_of_lmac(cgx_dev, lmac_id); + + index = id * lmac->mac_to_index_bmap.max; + + cfg = cgx_read(cgx_dev, 0, CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8); return cfg & CGX_RX_DMAC_ADR_MASK; } @@ -297,35 +533,51 @@ int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable) void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable) { struct cgx *cgx = cgx_get_pdata(cgx_id); + struct lmac *lmac = lmac_pdata(lmac_id, cgx); + u16 max_dmac = lmac->mac_to_index_bmap.max; struct mac_ops *mac_ops; + int index, i; u64 cfg = 0; + int id; if (!cgx) return; + id = get_sequence_id_of_lmac(cgx, lmac_id); + mac_ops = cgx->mac_ops; if (enable) { /* Enable promiscuous mode on LMAC */ cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0); - cfg &= ~(CGX_DMAC_CAM_ACCEPT | CGX_DMAC_MCAST_MODE); - cfg |= CGX_DMAC_BCAST_MODE; + cfg &= ~CGX_DMAC_CAM_ACCEPT; + cfg |= (CGX_DMAC_BCAST_MODE | CGX_DMAC_MCAST_MODE); cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg); - cfg = cgx_read(cgx, 0, - (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8)); - cfg &= ~CGX_DMAC_CAM_ADDR_ENABLE; - cgx_write(cgx, 0, - (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8), cfg); + for (i = 0; i < max_dmac; i++) { + index = id * max_dmac + i; + cfg = cgx_read(cgx, 0, + (CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8)); + cfg &= ~CGX_DMAC_CAM_ADDR_ENABLE; + cgx_write(cgx, 0, + (CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8), cfg); + } } else { /* Disable promiscuous mode */ cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0); cfg |= CGX_DMAC_CAM_ACCEPT | CGX_DMAC_MCAST_MODE; cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg); - cfg = cgx_read(cgx, 0, - (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8)); - cfg |= CGX_DMAC_CAM_ADDR_ENABLE; - cgx_write(cgx, 0, - (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8), cfg); + for (i = 0; i < max_dmac; i++) { + index = id * max_dmac + i; + cfg = cgx_read(cgx, 0, + (CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8)); + if ((cfg & CGX_RX_DMAC_ADR_MASK) != 0) { + cfg |= CGX_DMAC_CAM_ADDR_ENABLE; + cgx_write(cgx, 0, + (CGXX_CMRX_RX_DMAC_CAM0 + + index * 0x8), + cfg); + } + } } } @@ -1234,6 +1486,15 @@ static int cgx_lmac_init(struct cgx *cgx) } lmac->cgx = cgx; + lmac->mac_to_index_bmap.max = + MAX_DMAC_ENTRIES_PER_CGX / cgx->lmac_count; + err = rvu_alloc_bitmap(&lmac->mac_to_index_bmap); + if (err) + return err; + + /* Reserve first entry for default MAC address */ + set_bit(0, lmac->mac_to_index_bmap.bmap); + init_waitqueue_head(&lmac->wq_cmd_cmplt); mutex_init(&lmac->cmd_lock); spin_lock_init(&lmac->event_cb_lock); @@ -1274,6 +1535,7 @@ static int cgx_lmac_exit(struct cgx *cgx) continue; cgx->mac_ops->mac_pause_frm_config(cgx, lmac->lmac_id, false); cgx_configure_interrupt(cgx, lmac, lmac->lmac_id, true); + kfree(lmac->mac_to_index_bmap.bmap); kfree(lmac->name); kfree(lmac); } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h index 1252126216..237ba2b562 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h @@ -23,6 +23,7 @@ #define CGX_ID_MASK 0x7 #define MAX_LMAC_PER_CGX 4 +#define MAX_DMAC_ENTRIES_PER_CGX 32 #define CGX_FIFO_LEN 65536 /* 64K for both Rx & Tx */ #define CGX_OFFSET(x) ((x) * MAX_LMAC_PER_CGX) @@ -46,10 +47,12 @@ #define CGXX_CMRX_RX_DMAC_CTL0 (0x1F8 + mac_ops->csr_offset) #define CGX_DMAC_CTL0_CAM_ENABLE BIT_ULL(3) #define CGX_DMAC_CAM_ACCEPT BIT_ULL(3) +#define CGX_DMAC_MCAST_MODE_CAM BIT_ULL(2) #define CGX_DMAC_MCAST_MODE BIT_ULL(1) #define CGX_DMAC_BCAST_MODE BIT_ULL(0) #define CGXX_CMRX_RX_DMAC_CAM0 (0x200 + mac_ops->csr_offset) #define CGX_DMAC_CAM_ADDR_ENABLE BIT_ULL(48) +#define CGX_DMAC_CAM_ENTRY_LMACID GENMASK_ULL(50, 49) #define CGXX_CMRX_RX_DMAC_CAM1 0x400 #define CGX_RX_DMAC_ADR_MASK GENMASK_ULL(47, 0) #define CGXX_CMRX_TX_STAT0 0x700 @@ -139,7 +142,11 @@ int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat); int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable); int cgx_lmac_tx_enable(void *cgxd, int lmac_id, bool enable); int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr); +int cgx_lmac_addr_reset(u8 cgx_id, u8 lmac_id); u64 cgx_lmac_addr_get(u8 cgx_id, u8 lmac_id); +int cgx_lmac_addr_add(u8 cgx_id, u8 lmac_id, u8 *mac_addr); +int cgx_lmac_addr_del(u8 cgx_id, u8 lmac_id, u8 index); +int cgx_lmac_addr_max_entries_get(u8 cgx_id, u8 lmac_id); void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable); void cgx_lmac_enadis_rx_pause_fwding(void *cgxd, int lmac_id, bool enable); int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable); @@ -165,4 +172,7 @@ u8 cgx_get_lmacid(void *cgxd, u8 lmac_index); unsigned long cgx_get_lmac_bmap(void *cgxd); void cgx_lmac_write(int cgx_id, int lmac_id, u64 offset, u64 val); u64 cgx_lmac_read(int cgx_id, int lmac_id, u64 offset); +int cgx_lmac_addr_update(u8 cgx_id, u8 lmac_id, u8 *mac_addr, u8 index); +u64 cgx_read_dmac_ctrl(void *cgxd, int lmac_id); +u64 cgx_read_dmac_entry(void *cgxd, int index); #endif /* CGX_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h index 45706fd871..a8b7b1c7a1 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h @@ -10,17 +10,19 @@ #include "rvu.h" #include "cgx.h" /** - * struct lmac + * struct lmac - per lmac locks and properties * @wq_cmd_cmplt: waitq to keep the process blocked until cmd completion * @cmd_lock: Lock to serialize the command interface * @resp: command response * @link_info: link related information + * @mac_to_index_bmap: Mac address to CGX table index mapping * @event_cb: callback for linkchange events * @event_cb_lock: lock for serializing callback with unregister + * @cgx: parent cgx port + * @mcast_filters_count: Number of multicast filters installed + * @lmac_id: lmac port id * @cmd_pend: flag set before new command is started * flag cleared after command response is received - * @cgx: parent cgx port - * @lmac_id: lmac port id * @name: lmac port name */ struct lmac { @@ -29,12 +31,14 @@ struct lmac { struct mutex cmd_lock; u64 resp; struct cgx_link_user_info link_info; + struct rsrc_bmap mac_to_index_bmap; struct cgx_event_cb event_cb; /* lock for serializing callback with unregister */ spinlock_t event_cb_lock; - bool cmd_pend; struct cgx *cgx; + u8 mcast_filters_count; u8 lmac_id; + bool cmd_pend; char *name; }; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h index 770d862628..f5ec39de02 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h @@ -134,6 +134,8 @@ M(MSIX_OFFSET, 0x005, msix_offset, msg_req, msix_offset_rsp) \ M(VF_FLR, 0x006, vf_flr, msg_req, msg_rsp) \ M(PTP_OP, 0x007, ptp_op, ptp_req, ptp_rsp) \ M(GET_HW_CAP, 0x008, get_hw_cap, msg_req, get_hw_cap_rsp) \ +M(LMTST_TBL_SETUP, 0x00a, lmtst_tbl_setup, lmtst_tbl_setup_req, \ + msg_rsp) \ M(SET_VF_PERM, 0x00b, set_vf_perm, set_vf_perm, msg_rsp) \ /* CGX mbox IDs (range 0x200 - 0x3FF) */ \ M(CGX_START_RXTX, 0x200, cgx_start_rxtx, msg_req, msg_rsp) \ @@ -163,7 +165,15 @@ M(CGX_SET_LINK_MODE, 0x214, cgx_set_link_mode, cgx_set_link_mode_req,\ M(CGX_FEATURES_GET, 0x215, cgx_features_get, msg_req, \ cgx_features_info_msg) \ M(RPM_STATS, 0x216, rpm_stats, msg_req, rpm_stats_rsp) \ - /* NPA mbox IDs (range 0x400 - 0x5FF) */ \ +M(CGX_MAC_ADDR_ADD, 0x217, cgx_mac_addr_add, cgx_mac_addr_add_req, \ + cgx_mac_addr_add_rsp) \ +M(CGX_MAC_ADDR_DEL, 0x218, cgx_mac_addr_del, cgx_mac_addr_del_req, \ + msg_rsp) \ +M(CGX_MAC_MAX_ENTRIES_GET, 0x219, cgx_mac_max_entries_get, msg_req, \ + cgx_max_dmac_entries_get_rsp) \ +M(CGX_MAC_ADDR_RESET, 0x21A, cgx_mac_addr_reset, msg_req, msg_rsp) \ +M(CGX_MAC_ADDR_UPDATE, 0x21B, cgx_mac_addr_update, cgx_mac_addr_update_req, \ + msg_rsp) \ /* NPA mbox IDs (range 0x400 - 0x5FF) */ \ M(NPA_LF_ALLOC, 0x400, npa_lf_alloc, \ npa_lf_alloc_req, npa_lf_alloc_rsp) \ @@ -401,6 +411,38 @@ struct cgx_mac_addr_set_or_get { u8 mac_addr[ETH_ALEN]; }; +/* Structure for requesting the operation to + * add DMAC filter entry into CGX interface + */ +struct cgx_mac_addr_add_req { + struct mbox_msghdr hdr; + u8 mac_addr[ETH_ALEN]; +}; + +/* Structure for response against the operation to + * add DMAC filter entry into CGX interface + */ +struct cgx_mac_addr_add_rsp { + struct mbox_msghdr hdr; + u8 index; +}; + +/* Structure for requesting the operation to + * delete DMAC filter entry from CGX interface + */ +struct cgx_mac_addr_del_req { + struct mbox_msghdr hdr; + u8 index; +}; + +/* Structure for response against the operation to + * get maximum supported DMAC filter entries + */ +struct cgx_max_dmac_entries_get_rsp { + struct mbox_msghdr hdr; + u8 max_dmac_filters; +}; + struct cgx_link_user_info { uint64_t link_up:1; uint64_t full_duplex:1; @@ -499,6 +541,12 @@ struct cgx_set_link_mode_rsp { int status; }; +struct cgx_mac_addr_update_req { + struct mbox_msghdr hdr; + u8 mac_addr[ETH_ALEN]; + u8 index; +}; + #define RVU_LMAC_FEAT_FC BIT_ULL(0) /* pause frames */ #define RVU_LMAC_FEAT_PTP BIT_ULL(1) /* precision time protocol */ #define RVU_MAC_VERSION BIT_ULL(2) @@ -1278,6 +1326,14 @@ struct set_vf_perm { u64 flags; }; +struct lmtst_tbl_setup_req { + struct mbox_msghdr hdr; + u16 base_pcifunc; + u8 use_local_lmt_region; + u64 lmt_iova; + u64 rsvd[4]; +}; + /* CPT mailbox error codes * Range 901 - 1000. */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c index 0b092949d7..10cddf1ac7 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c @@ -2333,6 +2333,7 @@ static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc) rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSOW); rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSO); rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NPA); + rvu_reset_lmt_map_tbl(rvu, pcifunc); rvu_detach_rsrcs(rvu, NULL, pcifunc); mutex_unlock(&rvu->flr_lock); } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h index 9e5d9ba6f0..10e58a5d58 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h @@ -243,6 +243,7 @@ struct rvu_pfvf { u8 nix_blkaddr; /* BLKADDR_NIX0/1 assigned to this PF */ u8 nix_rx_intf; /* NIX0_RX/NIX1_RX interface to NPC */ u8 nix_tx_intf; /* NIX0_TX/NIX1_TX interface to NPC */ + u64 lmt_base_addr; /* Preseving the pcifunc's lmtst base addr*/ unsigned long flags; }; @@ -656,6 +657,8 @@ void rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable); int rvu_cgx_start_stop_io(struct rvu *rvu, u16 pcifunc, bool start); int rvu_cgx_nix_cuml_stats(struct rvu *rvu, void *cgxd, int lmac_id, int index, int rxtxflag, u64 *stat); +void rvu_cgx_disable_dmac_entries(struct rvu *rvu, u16 pcifunc); + /* NPA APIs */ int rvu_npa_init(struct rvu *rvu); void rvu_npa_freemem(struct rvu *rvu); @@ -741,6 +744,7 @@ void npc_read_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature); u32 rvu_cgx_get_fifolen(struct rvu *rvu); void *rvu_first_cgx_pdata(struct rvu *rvu); +int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id); int npc_get_nixlf_mcam_index(struct npc_mcam *mcam, u16 pcifunc, int nixlf, int type); @@ -754,6 +758,9 @@ int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int lf, int slot); int rvu_set_channels_base(struct rvu *rvu); void rvu_program_channels(struct rvu *rvu); +/* CN10K RVU - LMT*/ +void rvu_reset_lmt_map_tbl(struct rvu *rvu, u16 pcifunc); + #ifdef CONFIG_DEBUG_FS void rvu_dbg_init(struct rvu *rvu); void rvu_dbg_exit(struct rvu *rvu); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c index 6e2bf4fcd2..6cc8fbb719 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c @@ -63,7 +63,7 @@ static u16 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id) return rvu->cgxlmac2pf_map[CGX_OFFSET(cgx_id) + lmac_id]; } -static int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id) +int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id) { unsigned long pfmap; @@ -454,6 +454,31 @@ int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start) return 0; } +void rvu_cgx_disable_dmac_entries(struct rvu *rvu, u16 pcifunc) +{ + int pf = rvu_get_pf(pcifunc); + int i = 0, lmac_count = 0; + u8 max_dmac_filters; + u8 cgx_id, lmac_id; + void *cgx_dev; + + if (!is_cgx_config_permitted(rvu, pcifunc)) + return; + + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + cgx_dev = cgx_get_pdata(cgx_id); + lmac_count = cgx_get_lmac_cnt(cgx_dev); + max_dmac_filters = MAX_DMAC_ENTRIES_PER_CGX / lmac_count; + + for (i = 0; i < max_dmac_filters; i++) + cgx_lmac_addr_del(cgx_id, lmac_id, i); + + /* As cgx_lmac_addr_del does not clear entry for index 0 + * so it needs to be done explicitly + */ + cgx_lmac_addr_reset(cgx_id, lmac_id); +} + int rvu_mbox_handler_cgx_start_rxtx(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp) { @@ -557,6 +582,63 @@ int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu, return 0; } +int rvu_mbox_handler_cgx_mac_addr_add(struct rvu *rvu, + struct cgx_mac_addr_add_req *req, + struct cgx_mac_addr_add_rsp *rsp) +{ + int pf = rvu_get_pf(req->hdr.pcifunc); + u8 cgx_id, lmac_id; + int rc = 0; + + if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) + return -EPERM; + + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + rc = cgx_lmac_addr_add(cgx_id, lmac_id, req->mac_addr); + if (rc >= 0) { + rsp->index = rc; + return 0; + } + + return rc; +} + +int rvu_mbox_handler_cgx_mac_addr_del(struct rvu *rvu, + struct cgx_mac_addr_del_req *req, + struct msg_rsp *rsp) +{ + int pf = rvu_get_pf(req->hdr.pcifunc); + u8 cgx_id, lmac_id; + + if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) + return -EPERM; + + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + return cgx_lmac_addr_del(cgx_id, lmac_id, req->index); +} + +int rvu_mbox_handler_cgx_mac_max_entries_get(struct rvu *rvu, + struct msg_req *req, + struct cgx_max_dmac_entries_get_rsp + *rsp) +{ + int pf = rvu_get_pf(req->hdr.pcifunc); + u8 cgx_id, lmac_id; + + /* If msg is received from PFs(which are not mapped to CGX LMACs) + * or VF then no entries are allocated for DMAC filters at CGX level. + * So returning zero. + */ + if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) { + rsp->max_dmac_filters = 0; + return 0; + } + + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + rsp->max_dmac_filters = cgx_lmac_addr_max_entries_get(cgx_id, lmac_id); + return 0; +} + int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu, struct cgx_mac_addr_set_or_get *req, struct cgx_mac_addr_set_or_get *rsp) @@ -953,3 +1035,30 @@ int rvu_mbox_handler_cgx_set_link_mode(struct rvu *rvu, rsp->status = cgx_set_link_mode(cgxd, req->args, cgx_idx, lmac); return 0; } + +int rvu_mbox_handler_cgx_mac_addr_reset(struct rvu *rvu, struct msg_req *req, + struct msg_rsp *rsp) +{ + int pf = rvu_get_pf(req->hdr.pcifunc); + u8 cgx_id, lmac_id; + + if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) + return -EPERM; + + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + return cgx_lmac_addr_reset(cgx_id, lmac_id); +} + +int rvu_mbox_handler_cgx_mac_addr_update(struct rvu *rvu, + struct cgx_mac_addr_update_req *req, + struct msg_rsp *rsp) +{ + int pf = rvu_get_pf(req->hdr.pcifunc); + u8 cgx_id, lmac_id; + + if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) + return -EPERM; + + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + return cgx_lmac_addr_update(cgx_id, lmac_id, req->mac_addr, req->index); +} diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c index 7d9e71c696..8d48b64485 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c @@ -10,6 +10,206 @@ #include "cgx.h" #include "rvu_reg.h" +/* RVU LMTST */ +#define LMT_TBL_OP_READ 0 +#define LMT_TBL_OP_WRITE 1 +#define LMT_MAP_TABLE_SIZE (128 * 1024) +#define LMT_MAPTBL_ENTRY_SIZE 16 + +/* Function to perform operations (read/write) on lmtst map table */ +static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val, + int lmt_tbl_op) +{ + void __iomem *lmt_map_base; + u64 tbl_base; + + tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE); + + lmt_map_base = ioremap_wc(tbl_base, LMT_MAP_TABLE_SIZE); + if (!lmt_map_base) { + dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n"); + return -ENOMEM; + } + + if (lmt_tbl_op == LMT_TBL_OP_READ) { + *val = readq(lmt_map_base + index); + } else { + writeq((*val), (lmt_map_base + index)); + /* Flushing the AP interceptor cache to make APR_LMT_MAP_ENTRY_S + * changes effective. Write 1 for flush and read is being used as a + * barrier and sets up a data dependency. Write to 0 after a write + * to 1 to complete the flush. + */ + rvu_write64(rvu, BLKADDR_APR, APR_AF_LMT_CTL, BIT_ULL(0)); + rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_CTL); + rvu_write64(rvu, BLKADDR_APR, APR_AF_LMT_CTL, 0x00); + } + + iounmap(lmt_map_base); + return 0; +} + +static u32 rvu_get_lmtst_tbl_index(struct rvu *rvu, u16 pcifunc) +{ + return ((rvu_get_pf(pcifunc) * rvu->hw->total_vfs) + + (pcifunc & RVU_PFVF_FUNC_MASK)) * LMT_MAPTBL_ENTRY_SIZE; +} + +static int rvu_get_lmtaddr(struct rvu *rvu, u16 pcifunc, + u64 iova, u64 *lmt_addr) +{ + u64 pa, val, pf; + int err; + + if (!iova) { + dev_err(rvu->dev, "%s Requested Null address for transulation\n", __func__); + return -EINVAL; + } + + rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_REQ, iova); + pf = rvu_get_pf(pcifunc) & 0x1F; + val = BIT_ULL(63) | BIT_ULL(14) | BIT_ULL(13) | pf << 8 | + ((pcifunc & RVU_PFVF_FUNC_MASK) & 0xFF); + rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_TXN_REQ, val); + + err = rvu_poll_reg(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_RSP_STS, BIT_ULL(0), false); + if (err) { + dev_err(rvu->dev, "%s LMTLINE iova transulation failed\n", __func__); + return err; + } + val = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_RSP_STS); + if (val & ~0x1ULL) { + dev_err(rvu->dev, "%s LMTLINE iova transulation failed err:%llx\n", __func__, val); + return -EIO; + } + /* PA[51:12] = RVU_AF_SMMU_TLN_FLIT1[60:21] + * PA[11:0] = IOVA[11:0] + */ + pa = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_TLN_FLIT1) >> 21; + pa &= GENMASK_ULL(39, 0); + *lmt_addr = (pa << 12) | (iova & 0xFFF); + + return 0; +} + +static int rvu_update_lmtaddr(struct rvu *rvu, u16 pcifunc, u64 lmt_addr) +{ + struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); + u32 tbl_idx; + int err = 0; + u64 val; + + /* Read the current lmt addr of pcifunc */ + tbl_idx = rvu_get_lmtst_tbl_index(rvu, pcifunc); + err = lmtst_map_table_ops(rvu, tbl_idx, &val, LMT_TBL_OP_READ); + if (err) { + dev_err(rvu->dev, + "Failed to read LMT map table: index 0x%x err %d\n", + tbl_idx, err); + return err; + } + + /* Storing the seondary's lmt base address as this needs to be + * reverted in FLR. Also making sure this default value doesn't + * get overwritten on multiple calls to this mailbox. + */ + if (!pfvf->lmt_base_addr) + pfvf->lmt_base_addr = val; + + /* Update the LMT table with new addr */ + err = lmtst_map_table_ops(rvu, tbl_idx, &lmt_addr, LMT_TBL_OP_WRITE); + if (err) { + dev_err(rvu->dev, + "Failed to update LMT map table: index 0x%x err %d\n", + tbl_idx, err); + return err; + } + return 0; +} + +int rvu_mbox_handler_lmtst_tbl_setup(struct rvu *rvu, + struct lmtst_tbl_setup_req *req, + struct msg_rsp *rsp) +{ + u64 lmt_addr, val; + u32 pri_tbl_idx; + int err = 0; + + /* Check if PF_FUNC wants to use it's own local memory as LMTLINE + * region, if so, convert that IOVA to physical address and + * populate LMT table with that address + */ + if (req->use_local_lmt_region) { + err = rvu_get_lmtaddr(rvu, req->hdr.pcifunc, + req->lmt_iova, &lmt_addr); + if (err < 0) + return err; + + /* Update the lmt addr for this PFFUNC in the LMT table */ + err = rvu_update_lmtaddr(rvu, req->hdr.pcifunc, lmt_addr); + if (err) + return err; + } + + /* Reconfiguring lmtst map table in lmt region shared mode i.e. make + * multiple PF_FUNCs to share an LMTLINE region, so primary/base + * pcifunc (which is passed as an argument to mailbox) is the one + * whose lmt base address will be shared among other secondary + * pcifunc (will be the one who is calling this mailbox). + */ + if (req->base_pcifunc) { + /* Calculating the LMT table index equivalent to primary + * pcifunc. + */ + pri_tbl_idx = rvu_get_lmtst_tbl_index(rvu, req->base_pcifunc); + + /* Read the base lmt addr of the primary pcifunc */ + err = lmtst_map_table_ops(rvu, pri_tbl_idx, &val, + LMT_TBL_OP_READ); + if (err) { + dev_err(rvu->dev, + "Failed to read LMT map table: index 0x%x err %d\n", + pri_tbl_idx, err); + return err; + } + + /* Update the base lmt addr of secondary with primary's base + * lmt addr. + */ + err = rvu_update_lmtaddr(rvu, req->hdr.pcifunc, val); + if (err) + return err; + } + + return 0; +} + +/* Resetting the lmtst map table to original base addresses */ +void rvu_reset_lmt_map_tbl(struct rvu *rvu, u16 pcifunc) +{ + struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); + u32 tbl_idx; + int err; + + if (is_rvu_otx2(rvu)) + return; + + if (pfvf->lmt_base_addr) { + /* This corresponds to lmt map table index */ + tbl_idx = rvu_get_lmtst_tbl_index(rvu, pcifunc); + /* Reverting back original lmt base addr for respective + * pcifunc. + */ + err = lmtst_map_table_ops(rvu, tbl_idx, &pfvf->lmt_base_addr, + LMT_TBL_OP_WRITE); + if (err) + dev_err(rvu->dev, + "Failed to update LMT map table: index 0x%x err %d\n", + tbl_idx, err); + pfvf->lmt_base_addr = 0; + } +} + int rvu_set_channels_base(struct rvu *rvu) { struct rvu_hwinfo *hw = rvu->hw; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c index 3cc3c6fd1d..370d4ca1e5 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c @@ -1971,10 +1971,9 @@ static int cgx_print_stats(struct seq_file *s, int lmac_id) return err; } -static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused) +static int rvu_dbg_derive_lmacid(struct seq_file *filp, int *lmac_id) { struct dentry *current_dir; - int err, lmac_id; char *buf; current_dir = filp->file->f_path.dentry->d_parent; @@ -1982,17 +1981,87 @@ static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused) if (!buf) return -EINVAL; - err = kstrtoint(buf + 1, 10, &lmac_id); - if (!err) { - err = cgx_print_stats(filp, lmac_id); - if (err) - return err; - } + return kstrtoint(buf + 1, 10, lmac_id); +} + +static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused) +{ + int lmac_id, err; + + err = rvu_dbg_derive_lmacid(filp, &lmac_id); + if (!err) + return cgx_print_stats(filp, lmac_id); + return err; } RVU_DEBUG_SEQ_FOPS(cgx_stat, cgx_stat_display, NULL); +static int cgx_print_dmac_flt(struct seq_file *s, int lmac_id) +{ + struct pci_dev *pdev = NULL; + void *cgxd = s->private; + char *bcast, *mcast; + u16 index, domain; + u8 dmac[ETH_ALEN]; + struct rvu *rvu; + u64 cfg, mac; + int pf; + + rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM, + PCI_DEVID_OCTEONTX2_RVU_AF, NULL)); + if (!rvu) + return -ENODEV; + + pf = cgxlmac_to_pf(rvu, cgx_get_cgxid(cgxd), lmac_id); + domain = 2; + + pdev = pci_get_domain_bus_and_slot(domain, pf + 1, 0); + if (!pdev) + return 0; + + cfg = cgx_read_dmac_ctrl(cgxd, lmac_id); + bcast = cfg & CGX_DMAC_BCAST_MODE ? "ACCEPT" : "REJECT"; + mcast = cfg & CGX_DMAC_MCAST_MODE ? "ACCEPT" : "REJECT"; + + seq_puts(s, + "PCI dev RVUPF BROADCAST MULTICAST FILTER-MODE\n"); + seq_printf(s, "%s PF%d %9s %9s", + dev_name(&pdev->dev), pf, bcast, mcast); + if (cfg & CGX_DMAC_CAM_ACCEPT) + seq_printf(s, "%12s\n\n", "UNICAST"); + else + seq_printf(s, "%16s\n\n", "PROMISCUOUS"); + + seq_puts(s, "\nDMAC-INDEX ADDRESS\n"); + + for (index = 0 ; index < 32 ; index++) { + cfg = cgx_read_dmac_entry(cgxd, index); + /* Display enabled dmac entries associated with current lmac */ + if (lmac_id == FIELD_GET(CGX_DMAC_CAM_ENTRY_LMACID, cfg) && + FIELD_GET(CGX_DMAC_CAM_ADDR_ENABLE, cfg)) { + mac = FIELD_GET(CGX_RX_DMAC_ADR_MASK, cfg); + u64_to_ether_addr(mac, dmac); + seq_printf(s, "%7d %pM\n", index, dmac); + } + } + + return 0; +} + +static int rvu_dbg_cgx_dmac_flt_display(struct seq_file *filp, void *unused) +{ + int err, lmac_id; + + err = rvu_dbg_derive_lmacid(filp, &lmac_id); + if (!err) + return cgx_print_dmac_flt(filp, lmac_id); + + return err; +} + +RVU_DEBUG_SEQ_FOPS(cgx_dmac_flt, cgx_dmac_flt_display, NULL); + static void rvu_dbg_cgx_init(struct rvu *rvu) { struct mac_ops *mac_ops; @@ -2029,6 +2098,9 @@ static void rvu_dbg_cgx_init(struct rvu *rvu) debugfs_create_file("stats", 0600, rvu->rvu_dbg.lmac, cgx, &rvu_dbg_cgx_stat_fops); + debugfs_create_file("mac_filter", 0600, + rvu->rvu_dbg.lmac, cgx, + &rvu_dbg_cgx_dmac_flt_fops); } } } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c index d6f8210652..aeae377044 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c @@ -346,6 +346,9 @@ static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf) /* Free and disable any MCAM entries used by this NIX LF */ rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf); + + /* Disable DMAC filters used */ + rvu_cgx_disable_dmac_entries(rvu, pcifunc); } int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu, diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h index 76837d5e19..8b01ef6e2c 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h @@ -49,6 +49,11 @@ #define RVU_AF_PFX_VF_BAR4_ADDR (0x5400 | (a) << 4) #define RVU_AF_PFX_VF_BAR4_CFG (0x5600 | (a) << 4) #define RVU_AF_PFX_LMTLINE_ADDR (0x5800 | (a) << 4) +#define RVU_AF_SMMU_ADDR_REQ (0x6000) +#define RVU_AF_SMMU_TXN_REQ (0x6008) +#define RVU_AF_SMMU_ADDR_RSP_STS (0x6010) +#define RVU_AF_SMMU_ADDR_TLN (0x6018) +#define RVU_AF_SMMU_TLN_FLIT1 (0x6030) /* Admin function's privileged PF/VF registers */ #define RVU_PRIV_CONST (0x8000000) @@ -692,4 +697,9 @@ #define LBK_LINK_CFG_ID_MASK GENMASK_ULL(11, 6) #define LBK_LINK_CFG_BASE_MASK GENMASK_ULL(5, 0) +/* APR */ +#define APR_AF_LMT_CFG (0x000ull) +#define APR_AF_LMT_MAP_BASE (0x008ull) +#define APR_AF_LMT_CTL (0x010ull) + #endif /* RVU_REG_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h index 14aa8e37ea..5bbe6727d1 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h @@ -35,7 +35,8 @@ enum rvu_block_addr_e { BLKADDR_NDC_NPA0 = 0xeULL, BLKADDR_NDC_NIX1_RX = 0x10ULL, BLKADDR_NDC_NIX1_TX = 0x11ULL, - BLK_COUNT = 0x12ULL, + BLKADDR_APR = 0x16ULL, + BLK_COUNT = 0x17ULL, }; /* RVU Block Type Enumeration */ diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile index 457c94793e..3254b02205 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile +++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile @@ -7,7 +7,7 @@ obj-$(CONFIG_OCTEONTX2_PF) += rvu_nicpf.o obj-$(CONFIG_OCTEONTX2_VF) += rvu_nicvf.o rvu_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \ - otx2_ptp.o otx2_flows.o otx2_tc.o cn10k.o + otx2_ptp.o otx2_flows.o otx2_tc.o cn10k.o otx2_dmac_flt.o rvu_nicvf-y := otx2_vf.o ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c index 1b08896b46..184de94662 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c @@ -22,69 +22,52 @@ static struct dev_hw_ops cn10k_hw_ops = { .refill_pool_ptrs = cn10k_refill_pool_ptrs, }; -int cn10k_pf_lmtst_init(struct otx2_nic *pf) +int cn10k_lmtst_init(struct otx2_nic *pfvf) { - int size, num_lines; - u64 base; - if (!test_bit(CN10K_LMTST, &pf->hw.cap_flag)) { - pf->hw_ops = &otx2_hw_ops; + struct lmtst_tbl_setup_req *req; + int qcount, err; + + if (!test_bit(CN10K_LMTST, &pfvf->hw.cap_flag)) { + pfvf->hw_ops = &otx2_hw_ops; return 0; } - pf->hw_ops = &cn10k_hw_ops; - base = pci_resource_start(pf->pdev, PCI_MBOX_BAR_NUM) + - (MBOX_SIZE * (pf->total_vfs + 1)); + pfvf->hw_ops = &cn10k_hw_ops; + qcount = pfvf->hw.max_queues; + /* LMTST lines allocation + * qcount = num_online_cpus(); + * NPA = TX + RX + XDP. + * NIX = TX * 32 (For Burst SQE flush). + */ + pfvf->tot_lmt_lines = (qcount * 3) + (qcount * 32); + pfvf->npa_lmt_lines = qcount * 3; + pfvf->nix_lmt_size = LMT_BURST_SIZE * LMT_LINE_SIZE; - size = pci_resource_len(pf->pdev, PCI_MBOX_BAR_NUM) - - (MBOX_SIZE * (pf->total_vfs + 1)); - - pf->hw.lmt_base = ioremap(base, size); - - if (!pf->hw.lmt_base) { - dev_err(pf->dev, "Unable to map PF LMTST region\n"); + mutex_lock(&pfvf->mbox.lock); + req = otx2_mbox_alloc_msg_lmtst_tbl_setup(&pfvf->mbox); + if (!req) { + mutex_unlock(&pfvf->mbox.lock); return -ENOMEM; } - /* FIXME: Get the num of LMTST lines from LMT table */ - pf->tot_lmt_lines = size / LMT_LINE_SIZE; - num_lines = (pf->tot_lmt_lines - NIX_LMTID_BASE) / - pf->hw.tx_queues; - /* Number of LMT lines per SQ queues */ - pf->nix_lmt_lines = num_lines > 32 ? 32 : num_lines; + req->use_local_lmt_region = true; + + err = qmem_alloc(pfvf->dev, &pfvf->dync_lmt, pfvf->tot_lmt_lines, + LMT_LINE_SIZE); + if (err) { + mutex_unlock(&pfvf->mbox.lock); + return err; + } + pfvf->hw.lmt_base = (u64 *)pfvf->dync_lmt->base; + req->lmt_iova = (u64)pfvf->dync_lmt->iova; + + err = otx2_sync_mbox_msg(&pfvf->mbox); + mutex_unlock(&pfvf->mbox.lock); - pf->nix_lmt_size = pf->nix_lmt_lines * LMT_LINE_SIZE; return 0; } - -int cn10k_vf_lmtst_init(struct otx2_nic *vf) -{ - int size, num_lines; - - if (!test_bit(CN10K_LMTST, &vf->hw.cap_flag)) { - vf->hw_ops = &otx2_hw_ops; - return 0; - } - - vf->hw_ops = &cn10k_hw_ops; - size = pci_resource_len(vf->pdev, PCI_MBOX_BAR_NUM); - vf->hw.lmt_base = ioremap_wc(pci_resource_start(vf->pdev, - PCI_MBOX_BAR_NUM), - size); - if (!vf->hw.lmt_base) { - dev_err(vf->dev, "Unable to map VF LMTST region\n"); - return -ENOMEM; - } - - vf->tot_lmt_lines = size / LMT_LINE_SIZE; - /* LMTST lines per SQ */ - num_lines = (vf->tot_lmt_lines - NIX_LMTID_BASE) / - vf->hw.tx_queues; - vf->nix_lmt_lines = num_lines > 32 ? 32 : num_lines; - vf->nix_lmt_size = vf->nix_lmt_lines * LMT_LINE_SIZE; - return 0; -} -EXPORT_SYMBOL(cn10k_vf_lmtst_init); +EXPORT_SYMBOL(cn10k_lmtst_init); int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura) { @@ -93,9 +76,11 @@ int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura) struct otx2_snd_queue *sq; sq = &pfvf->qset.sq[qidx]; - sq->lmt_addr = (__force u64 *)((u64)pfvf->hw.nix_lmt_base + + sq->lmt_addr = (u64 *)((u64)pfvf->hw.nix_lmt_base + (qidx * pfvf->nix_lmt_size)); + sq->lmt_id = pfvf->npa_lmt_lines + (qidx * LMT_BURST_SIZE); + /* Get memory to put this msg */ aq = otx2_mbox_alloc_msg_nix_cn10k_aq_enq(&pfvf->mbox); if (!aq) @@ -158,15 +143,13 @@ void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq) void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx) { - struct otx2_nic *pfvf = dev; - int lmt_id = NIX_LMTID_BASE + (qidx * pfvf->nix_lmt_lines); u64 val = 0, tar_addr = 0; /* FIXME: val[0:10] LMT_ID. * [12:15] no of LMTST - 1 in the burst. * [19:63] data size of each LMTST in the burst except first. */ - val = (lmt_id & 0x7FF); + val = (sq->lmt_id & 0x7FF); /* Target address for LMTST flush tells HW how many 128bit * words are present. * tar_addr[6:4] size of first LMTST - 1 in units of 128b. diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h index 71292a4cf1..1a1ae33447 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h @@ -12,8 +12,7 @@ void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq); void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx); int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura); -int cn10k_pf_lmtst_init(struct otx2_nic *pf); -int cn10k_vf_lmtst_init(struct otx2_nic *vf); +int cn10k_lmtst_init(struct otx2_nic *pfvf); int cn10k_free_all_ipolicers(struct otx2_nic *pfvf); int cn10k_alloc_matchall_ipolicer(struct otx2_nic *pfvf); int cn10k_free_matchall_ipolicer(struct otx2_nic *pfvf); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c index cf7875d51d..7cccd802c4 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c @@ -210,6 +210,9 @@ int otx2_set_mac_address(struct net_device *netdev, void *p) /* update dmac field in vlan offload rule */ if (pfvf->flags & OTX2_FLAG_RX_VLAN_SUPPORT) otx2_install_rxvlan_offload_flow(pfvf); + /* update dmac address in ntuple and DMAC filter list */ + if (pfvf->flags & OTX2_FLAG_DMACFLTR_SUPPORT) + otx2_dmacflt_update_pfmac_flow(pfvf); } else { return -EPERM; } diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h index 234b330f31..8fd58cd07f 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h @@ -218,8 +218,8 @@ struct otx2_hw { unsigned long cap_flag; #define LMT_LINE_SIZE 128 -#define NIX_LMTID_BASE 72 /* RX + TX + XDP */ - void __iomem *lmt_base; +#define LMT_BURST_SIZE 32 /* 32 LMTST lines for burst SQE flush */ + u64 *lmt_base; u64 *npa_lmt_base; u64 *nix_lmt_base; }; @@ -288,6 +288,9 @@ struct otx2_flow_config { u16 tc_flower_offset; u16 ntuple_max_flows; u16 tc_max_flows; + u8 dmacflt_max_flows; + u8 *bmap_to_dmacindex; + unsigned long dmacflt_bmap; struct list_head flow_list; }; @@ -329,6 +332,7 @@ struct otx2_nic { #define OTX2_FLAG_TC_FLOWER_SUPPORT BIT_ULL(11) #define OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED BIT_ULL(12) #define OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED BIT_ULL(13) +#define OTX2_FLAG_DMACFLTR_SUPPORT BIT_ULL(14) u64 flags; struct otx2_qset qset; @@ -363,8 +367,9 @@ struct otx2_nic { /* Block address of NIX either BLKADDR_NIX0 or BLKADDR_NIX1 */ int nix_blkaddr; /* LMTST Lines info */ + struct qmem *dync_lmt; u16 tot_lmt_lines; - u16 nix_lmt_lines; + u16 npa_lmt_lines; u32 nix_lmt_size; struct otx2_ptp *ptp; @@ -833,4 +838,11 @@ int otx2_init_tc(struct otx2_nic *nic); void otx2_shutdown_tc(struct otx2_nic *nic); int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type, void *type_data); +/* CGX/RPM DMAC filters support */ +int otx2_dmacflt_get_max_cnt(struct otx2_nic *pf); +int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u8 bit_pos); +int otx2_dmacflt_remove(struct otx2_nic *pf, const u8 *mac, u8 bit_pos); +int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u8 bit_pos); +void otx2_dmacflt_reinstall_flows(struct otx2_nic *pf); +void otx2_dmacflt_update_pfmac_flow(struct otx2_nic *pfvf); #endif /* OTX2_COMMON_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c new file mode 100644 index 0000000000..383a6b5cb6 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c @@ -0,0 +1,173 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell OcteonTx2 RVU Physcial Function ethernet driver + * + * Copyright (C) 2021 Marvell. + */ + +#include "otx2_common.h" + +static int otx2_dmacflt_do_add(struct otx2_nic *pf, const u8 *mac, + u8 *dmac_index) +{ + struct cgx_mac_addr_add_req *req; + struct cgx_mac_addr_add_rsp *rsp; + int err; + + mutex_lock(&pf->mbox.lock); + + req = otx2_mbox_alloc_msg_cgx_mac_addr_add(&pf->mbox); + if (!req) { + mutex_unlock(&pf->mbox.lock); + return -ENOMEM; + } + + ether_addr_copy(req->mac_addr, mac); + err = otx2_sync_mbox_msg(&pf->mbox); + + if (!err) { + rsp = (struct cgx_mac_addr_add_rsp *) + otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr); + *dmac_index = rsp->index; + } + + mutex_unlock(&pf->mbox.lock); + return err; +} + +static int otx2_dmacflt_add_pfmac(struct otx2_nic *pf) +{ + struct cgx_mac_addr_set_or_get *req; + int err; + + mutex_lock(&pf->mbox.lock); + + req = otx2_mbox_alloc_msg_cgx_mac_addr_set(&pf->mbox); + if (!req) { + mutex_unlock(&pf->mbox.lock); + return -ENOMEM; + } + + ether_addr_copy(req->mac_addr, pf->netdev->dev_addr); + err = otx2_sync_mbox_msg(&pf->mbox); + + mutex_unlock(&pf->mbox.lock); + return err; +} + +int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u8 bit_pos) +{ + u8 *dmacindex; + + /* Store dmacindex returned by CGX/RPM driver which will + * be used for macaddr update/remove + */ + dmacindex = &pf->flow_cfg->bmap_to_dmacindex[bit_pos]; + + if (ether_addr_equal(mac, pf->netdev->dev_addr)) + return otx2_dmacflt_add_pfmac(pf); + else + return otx2_dmacflt_do_add(pf, mac, dmacindex); +} + +static int otx2_dmacflt_do_remove(struct otx2_nic *pfvf, const u8 *mac, + u8 dmac_index) +{ + struct cgx_mac_addr_del_req *req; + int err; + + mutex_lock(&pfvf->mbox.lock); + req = otx2_mbox_alloc_msg_cgx_mac_addr_del(&pfvf->mbox); + if (!req) { + mutex_unlock(&pfvf->mbox.lock); + return -ENOMEM; + } + + req->index = dmac_index; + + err = otx2_sync_mbox_msg(&pfvf->mbox); + mutex_unlock(&pfvf->mbox.lock); + + return err; +} + +static int otx2_dmacflt_remove_pfmac(struct otx2_nic *pf) +{ + struct msg_req *req; + int err; + + mutex_lock(&pf->mbox.lock); + req = otx2_mbox_alloc_msg_cgx_mac_addr_reset(&pf->mbox); + if (!req) { + mutex_unlock(&pf->mbox.lock); + return -ENOMEM; + } + + err = otx2_sync_mbox_msg(&pf->mbox); + + mutex_unlock(&pf->mbox.lock); + return err; +} + +int otx2_dmacflt_remove(struct otx2_nic *pf, const u8 *mac, + u8 bit_pos) +{ + u8 dmacindex = pf->flow_cfg->bmap_to_dmacindex[bit_pos]; + + if (ether_addr_equal(mac, pf->netdev->dev_addr)) + return otx2_dmacflt_remove_pfmac(pf); + else + return otx2_dmacflt_do_remove(pf, mac, dmacindex); +} + +/* CGX/RPM blocks support max unicast entries of 32. + * on typical configuration MAC block associated + * with 4 lmacs, each lmac will have 8 dmac entries + */ +int otx2_dmacflt_get_max_cnt(struct otx2_nic *pf) +{ + struct cgx_max_dmac_entries_get_rsp *rsp; + struct msg_req *msg; + int err; + + mutex_lock(&pf->mbox.lock); + msg = otx2_mbox_alloc_msg_cgx_mac_max_entries_get(&pf->mbox); + + if (!msg) { + mutex_unlock(&pf->mbox.lock); + return -ENOMEM; + } + + err = otx2_sync_mbox_msg(&pf->mbox); + if (err) + goto out; + + rsp = (struct cgx_max_dmac_entries_get_rsp *) + otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &msg->hdr); + pf->flow_cfg->dmacflt_max_flows = rsp->max_dmac_filters; + +out: + mutex_unlock(&pf->mbox.lock); + return err; +} + +int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u8 bit_pos) +{ + struct cgx_mac_addr_update_req *req; + int rc; + + mutex_lock(&pf->mbox.lock); + + req = otx2_mbox_alloc_msg_cgx_mac_addr_update(&pf->mbox); + + if (!req) { + mutex_unlock(&pf->mbox.lock); + return -ENOMEM; + } + + ether_addr_copy(req->mac_addr, mac); + req->index = pf->flow_cfg->bmap_to_dmacindex[bit_pos]; + rc = otx2_sync_mbox_msg(&pf->mbox); + + mutex_unlock(&pf->mbox.lock); + return rc; +} diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c index 8c97106bdd..4d9de52580 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c @@ -18,6 +18,12 @@ struct otx2_flow { bool is_vf; u8 rss_ctx_id; int vf; + bool dmac_filter; +}; + +enum dmac_req { + DMAC_ADDR_UPDATE, + DMAC_ADDR_DEL }; static void otx2_clear_ntuple_flow_info(struct otx2_nic *pfvf, struct otx2_flow_config *flow_cfg) @@ -219,6 +225,22 @@ int otx2_mcam_flow_init(struct otx2_nic *pf) if (!pf->mac_table) return -ENOMEM; + otx2_dmacflt_get_max_cnt(pf); + + /* DMAC filters are not allocated */ + if (!pf->flow_cfg->dmacflt_max_flows) + return 0; + + pf->flow_cfg->bmap_to_dmacindex = + devm_kzalloc(pf->dev, sizeof(u8) * + pf->flow_cfg->dmacflt_max_flows, + GFP_KERNEL); + + if (!pf->flow_cfg->bmap_to_dmacindex) + return -ENOMEM; + + pf->flags |= OTX2_FLAG_DMACFLTR_SUPPORT; + return 0; } @@ -280,6 +302,12 @@ int otx2_add_macfilter(struct net_device *netdev, const u8 *mac) { struct otx2_nic *pf = netdev_priv(netdev); + if (bitmap_weight(&pf->flow_cfg->dmacflt_bmap, + pf->flow_cfg->dmacflt_max_flows)) + netdev_warn(netdev, + "Add %pM to CGX/RPM DMAC filters list as well\n", + mac); + return otx2_do_add_macfilter(pf, mac); } @@ -351,12 +379,22 @@ static void otx2_add_flow_to_list(struct otx2_nic *pfvf, struct otx2_flow *flow) list_add(&flow->list, head); } +static int otx2_get_maxflows(struct otx2_flow_config *flow_cfg) +{ + if (flow_cfg->nr_flows == flow_cfg->ntuple_max_flows || + bitmap_weight(&flow_cfg->dmacflt_bmap, + flow_cfg->dmacflt_max_flows)) + return flow_cfg->ntuple_max_flows + flow_cfg->dmacflt_max_flows; + else + return flow_cfg->ntuple_max_flows; +} + int otx2_get_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc, u32 location) { struct otx2_flow *iter; - if (location >= pfvf->flow_cfg->ntuple_max_flows) + if (location >= otx2_get_maxflows(pfvf->flow_cfg)) return -EINVAL; list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) { @@ -378,7 +416,7 @@ int otx2_get_all_flows(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc, int idx = 0; int err = 0; - nfc->data = pfvf->flow_cfg->ntuple_max_flows; + nfc->data = otx2_get_maxflows(pfvf->flow_cfg); while ((!err || err == -ENOENT) && idx < rule_cnt) { err = otx2_get_flow(pfvf, nfc, location); if (!err) @@ -760,6 +798,32 @@ int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp, return 0; } +static int otx2_is_flow_rule_dmacfilter(struct otx2_nic *pfvf, + struct ethtool_rx_flow_spec *fsp) +{ + struct ethhdr *eth_mask = &fsp->m_u.ether_spec; + struct ethhdr *eth_hdr = &fsp->h_u.ether_spec; + u64 ring_cookie = fsp->ring_cookie; + u32 flow_type; + + if (!(pfvf->flags & OTX2_FLAG_DMACFLTR_SUPPORT)) + return false; + + flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS); + + /* CGX/RPM block dmac filtering configured for white listing + * check for action other than DROP + */ + if (flow_type == ETHER_FLOW && ring_cookie != RX_CLS_FLOW_DISC && + !ethtool_get_flow_spec_ring_vf(ring_cookie)) { + if (is_zero_ether_addr(eth_mask->h_dest) && + is_valid_ether_addr(eth_hdr->h_dest)) + return true; + } + + return false; +} + static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow) { u64 ring_cookie = flow->flow_spec.ring_cookie; @@ -818,14 +882,46 @@ static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow) return err; } +static int otx2_add_flow_with_pfmac(struct otx2_nic *pfvf, + struct otx2_flow *flow) +{ + struct otx2_flow *pf_mac; + struct ethhdr *eth_hdr; + + pf_mac = kzalloc(sizeof(*pf_mac), GFP_KERNEL); + if (!pf_mac) + return -ENOMEM; + + pf_mac->entry = 0; + pf_mac->dmac_filter = true; + pf_mac->location = pfvf->flow_cfg->ntuple_max_flows; + memcpy(&pf_mac->flow_spec, &flow->flow_spec, + sizeof(struct ethtool_rx_flow_spec)); + pf_mac->flow_spec.location = pf_mac->location; + + /* Copy PF mac address */ + eth_hdr = &pf_mac->flow_spec.h_u.ether_spec; + ether_addr_copy(eth_hdr->h_dest, pfvf->netdev->dev_addr); + + /* Install DMAC filter with PF mac address */ + otx2_dmacflt_add(pfvf, eth_hdr->h_dest, 0); + + otx2_add_flow_to_list(pfvf, pf_mac); + pfvf->flow_cfg->nr_flows++; + set_bit(0, &pfvf->flow_cfg->dmacflt_bmap); + + return 0; +} + int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc) { struct otx2_flow_config *flow_cfg = pfvf->flow_cfg; struct ethtool_rx_flow_spec *fsp = &nfc->fs; struct otx2_flow *flow; + struct ethhdr *eth_hdr; bool new = false; + int err = 0; u32 ring; - int err; ring = ethtool_get_flow_spec_ring(fsp->ring_cookie); if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT)) @@ -834,16 +930,15 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc) if (ring >= pfvf->hw.rx_queues && fsp->ring_cookie != RX_CLS_FLOW_DISC) return -EINVAL; - if (fsp->location >= flow_cfg->ntuple_max_flows) + if (fsp->location >= otx2_get_maxflows(flow_cfg)) return -EINVAL; flow = otx2_find_flow(pfvf, fsp->location); if (!flow) { - flow = kzalloc(sizeof(*flow), GFP_ATOMIC); + flow = kzalloc(sizeof(*flow), GFP_KERNEL); if (!flow) return -ENOMEM; flow->location = fsp->location; - flow->entry = flow_cfg->flow_ent[flow->location]; new = true; } /* struct copy */ @@ -852,7 +947,54 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc) if (fsp->flow_type & FLOW_RSS) flow->rss_ctx_id = nfc->rss_context; - err = otx2_add_flow_msg(pfvf, flow); + if (otx2_is_flow_rule_dmacfilter(pfvf, &flow->flow_spec)) { + eth_hdr = &flow->flow_spec.h_u.ether_spec; + + /* Sync dmac filter table with updated fields */ + if (flow->dmac_filter) + return otx2_dmacflt_update(pfvf, eth_hdr->h_dest, + flow->entry); + + if (bitmap_full(&flow_cfg->dmacflt_bmap, + flow_cfg->dmacflt_max_flows)) { + netdev_warn(pfvf->netdev, + "Can't insert the rule %d as max allowed dmac filters are %d\n", + flow->location + + flow_cfg->dmacflt_max_flows, + flow_cfg->dmacflt_max_flows); + err = -EINVAL; + if (new) + kfree(flow); + return err; + } + + /* Install PF mac address to DMAC filter list */ + if (!test_bit(0, &flow_cfg->dmacflt_bmap)) + otx2_add_flow_with_pfmac(pfvf, flow); + + flow->dmac_filter = true; + flow->entry = find_first_zero_bit(&flow_cfg->dmacflt_bmap, + flow_cfg->dmacflt_max_flows); + fsp->location = flow_cfg->ntuple_max_flows + flow->entry; + flow->flow_spec.location = fsp->location; + flow->location = fsp->location; + + set_bit(flow->entry, &flow_cfg->dmacflt_bmap); + otx2_dmacflt_add(pfvf, eth_hdr->h_dest, flow->entry); + + } else { + if (flow->location >= pfvf->flow_cfg->ntuple_max_flows) { + netdev_warn(pfvf->netdev, + "Can't insert non dmac ntuple rule at %d, allowed range %d-0\n", + flow->location, + flow_cfg->ntuple_max_flows - 1); + err = -EINVAL; + } else { + flow->entry = flow_cfg->flow_ent[flow->location]; + err = otx2_add_flow_msg(pfvf, flow); + } + } + if (err) { if (new) kfree(flow); @@ -890,20 +1032,70 @@ static int otx2_remove_flow_msg(struct otx2_nic *pfvf, u16 entry, bool all) return err; } +static void otx2_update_rem_pfmac(struct otx2_nic *pfvf, int req) +{ + struct otx2_flow *iter; + struct ethhdr *eth_hdr; + bool found = false; + + list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) { + if (iter->dmac_filter && iter->entry == 0) { + eth_hdr = &iter->flow_spec.h_u.ether_spec; + if (req == DMAC_ADDR_DEL) { + otx2_dmacflt_remove(pfvf, eth_hdr->h_dest, + 0); + clear_bit(0, &pfvf->flow_cfg->dmacflt_bmap); + found = true; + } else { + ether_addr_copy(eth_hdr->h_dest, + pfvf->netdev->dev_addr); + otx2_dmacflt_update(pfvf, eth_hdr->h_dest, 0); + } + break; + } + } + + if (found) { + list_del(&iter->list); + kfree(iter); + pfvf->flow_cfg->nr_flows--; + } +} + int otx2_remove_flow(struct otx2_nic *pfvf, u32 location) { struct otx2_flow_config *flow_cfg = pfvf->flow_cfg; struct otx2_flow *flow; int err; - if (location >= flow_cfg->ntuple_max_flows) + if (location >= otx2_get_maxflows(flow_cfg)) return -EINVAL; flow = otx2_find_flow(pfvf, location); if (!flow) return -ENOENT; - err = otx2_remove_flow_msg(pfvf, flow->entry, false); + if (flow->dmac_filter) { + struct ethhdr *eth_hdr = &flow->flow_spec.h_u.ether_spec; + + /* user not allowed to remove dmac filter with interface mac */ + if (ether_addr_equal(pfvf->netdev->dev_addr, eth_hdr->h_dest)) + return -EPERM; + + err = otx2_dmacflt_remove(pfvf, eth_hdr->h_dest, + flow->entry); + clear_bit(flow->entry, &flow_cfg->dmacflt_bmap); + /* If all dmac filters are removed delete macfilter with + * interface mac address and configure CGX/RPM block in + * promiscuous mode + */ + if (bitmap_weight(&flow_cfg->dmacflt_bmap, + flow_cfg->dmacflt_max_flows) == 1) + otx2_update_rem_pfmac(pfvf, DMAC_ADDR_DEL); + } else { + err = otx2_remove_flow_msg(pfvf, flow->entry, false); + } + if (err) return err; @@ -1100,3 +1292,22 @@ int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable) mutex_unlock(&pf->mbox.lock); return rsp_hdr->rc; } + +void otx2_dmacflt_reinstall_flows(struct otx2_nic *pf) +{ + struct otx2_flow *iter; + struct ethhdr *eth_hdr; + + list_for_each_entry(iter, &pf->flow_cfg->flow_list, list) { + if (iter->dmac_filter) { + eth_hdr = &iter->flow_spec.h_u.ether_spec; + otx2_dmacflt_add(pf, eth_hdr->h_dest, + iter->entry); + } + } +} + +void otx2_dmacflt_update_pfmac_flow(struct otx2_nic *pfvf) +{ + otx2_update_rem_pfmac(pfvf, DMAC_ADDR_UPDATE); +} diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c index 59912f7341..f300b807a8 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c @@ -1110,6 +1110,11 @@ static int otx2_cgx_config_loopback(struct otx2_nic *pf, bool enable) struct msg_req *msg; int err; + if (enable && bitmap_weight(&pf->flow_cfg->dmacflt_bmap, + pf->flow_cfg->dmacflt_max_flows)) + netdev_warn(pf->netdev, + "CGX/RPM internal loopback might not work as DMAC filters are active\n"); + mutex_lock(&pf->mbox.lock); if (enable) msg = otx2_mbox_alloc_msg_cgx_intlbk_enable(&pf->mbox); @@ -1533,10 +1538,10 @@ int otx2_open(struct net_device *netdev) if (test_bit(CN10K_LMTST, &pf->hw.cap_flag)) { /* Reserve LMT lines for NPA AURA batch free */ - pf->hw.npa_lmt_base = (__force u64 *)pf->hw.lmt_base; + pf->hw.npa_lmt_base = pf->hw.lmt_base; /* Reserve LMT lines for NIX TX */ - pf->hw.nix_lmt_base = (__force u64 *)((u64)pf->hw.npa_lmt_base + - (NIX_LMTID_BASE * LMT_LINE_SIZE)); + pf->hw.nix_lmt_base = (u64 *)((u64)pf->hw.npa_lmt_base + + (pf->npa_lmt_lines * LMT_LINE_SIZE)); } err = otx2_init_hw_resources(pf); @@ -1644,6 +1649,10 @@ int otx2_open(struct net_device *netdev) /* Restore pause frame settings */ otx2_config_pause_frm(pf); + /* Install DMAC Filters */ + if (pf->flags & OTX2_FLAG_DMACFLTR_SUPPORT) + otx2_dmacflt_reinstall_flows(pf); + err = otx2_rxtx_enable(pf, true); if (err) goto err_tx_stop_queues; @@ -2526,7 +2535,7 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (err) goto err_detach_rsrc; - err = cn10k_pf_lmtst_init(pf); + err = cn10k_lmtst_init(pf); if (err) goto err_detach_rsrc; @@ -2630,8 +2639,8 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id) err_ptp_destroy: otx2_ptp_destroy(pf); err_detach_rsrc: - if (hw->lmt_base) - iounmap(hw->lmt_base); + if (test_bit(CN10K_LMTST, &pf->hw.cap_flag)) + qmem_free(pf->dev, pf->dync_lmt); otx2_detach_resources(&pf->mbox); err_disable_mbox_intr: otx2_disable_mbox_intr(pf); @@ -2772,9 +2781,8 @@ static void otx2_remove(struct pci_dev *pdev) otx2_mcam_flow_del(pf); otx2_shutdown_tc(pf); otx2_detach_resources(&pf->mbox); - if (pf->hw.lmt_base) - iounmap(pf->hw.lmt_base); - + if (test_bit(CN10K_LMTST, &pf->hw.cap_flag)) + qmem_free(pf->dev, pf->dync_lmt); otx2_disable_mbox_intr(pf); otx2_pfaf_mbox_destroy(pf); pci_free_irq_vectors(pf->pdev); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c index 905fc02a7d..972b202b98 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c @@ -288,7 +288,7 @@ static int otx2_tc_parse_actions(struct otx2_nic *nic, struct otx2_nic *priv; u32 burst, mark = 0; u8 nr_police = 0; - bool pps; + bool pps = false; u64 rate; int i; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h index 52486c1f09..2f144e2cf4 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h @@ -83,6 +83,7 @@ struct otx2_snd_queue { u16 num_sqbs; u16 sqe_thresh; u8 sqe_per_sqb; + u32 lmt_id; u64 io_addr; u64 *aura_fc_addr; u64 *lmt_addr; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c index 13a908f75b..a8bee5aefe 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c @@ -609,7 +609,7 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (err) goto err_detach_rsrc; - err = cn10k_vf_lmtst_init(vf); + err = cn10k_lmtst_init(vf); if (err) goto err_detach_rsrc; @@ -667,8 +667,8 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id) err_unreg_netdev: unregister_netdev(netdev); err_detach_rsrc: - if (hw->lmt_base) - iounmap(hw->lmt_base); + if (test_bit(CN10K_LMTST, &vf->hw.cap_flag)) + qmem_free(vf->dev, vf->dync_lmt); otx2_detach_resources(&vf->mbox); err_disable_mbox_intr: otx2vf_disable_mbox_intr(vf); @@ -700,10 +700,8 @@ static void otx2vf_remove(struct pci_dev *pdev) destroy_workqueue(vf->otx2_wq); otx2vf_disable_mbox_intr(vf); otx2_detach_resources(&vf->mbox); - - if (vf->hw.lmt_base) - iounmap(vf->hw.lmt_base); - + if (test_bit(CN10K_LMTST, &vf->hw.cap_flag)) + qmem_free(vf->dev, vf->dync_lmt); otx2vf_vfaf_mbox_destroy(vf); pci_free_irq_vectors(vf->pdev); pci_set_drvdata(pdev, NULL); diff --git a/drivers/net/ethernet/microchip/sparx5/Kconfig b/drivers/net/ethernet/microchip/sparx5/Kconfig index a80419d8d4..ac403d43c7 100644 --- a/drivers/net/ethernet/microchip/sparx5/Kconfig +++ b/drivers/net/ethernet/microchip/sparx5/Kconfig @@ -2,6 +2,7 @@ config SPARX5_SWITCH tristate "Sparx5 switch driver" depends on NET_SWITCHDEV depends on HAS_IOMEM + depends on OF select PHYLINK select PHY_SPARX5_SERDES select RESET_CONTROLLER diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c index 5249b64f4f..49def6934c 100644 --- a/drivers/net/ethernet/moxa/moxart_ether.c +++ b/drivers/net/ethernet/moxa/moxart_ether.c @@ -540,10 +540,8 @@ static int moxart_mac_probe(struct platform_device *pdev) SET_NETDEV_DEV(ndev, &pdev->dev); ret = register_netdev(ndev); - if (ret) { - free_netdev(ndev); + if (ret) goto init_fail; - } netdev_dbg(ndev, "%s: IRQ=%d address=%pM\n", __func__, ndev->irq, ndev->dev_addr); diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c index 3e89e34f86..e9d260d84b 100644 --- a/drivers/net/ethernet/mscc/ocelot_net.c +++ b/drivers/net/ethernet/mscc/ocelot_net.c @@ -1298,6 +1298,7 @@ static int ocelot_netdevice_lag_leave(struct net_device *dev, } static int ocelot_netdevice_changeupper(struct net_device *dev, + struct net_device *brport_dev, struct netdev_notifier_changeupper_info *info) { struct netlink_ext_ack *extack; @@ -1307,11 +1308,11 @@ static int ocelot_netdevice_changeupper(struct net_device *dev, if (netif_is_bridge_master(info->upper_dev)) { if (info->linking) - err = ocelot_netdevice_bridge_join(dev, dev, + err = ocelot_netdevice_bridge_join(dev, brport_dev, info->upper_dev, extack); else - err = ocelot_netdevice_bridge_leave(dev, dev, + err = ocelot_netdevice_bridge_leave(dev, brport_dev, info->upper_dev); } if (netif_is_lag_master(info->upper_dev)) { @@ -1346,7 +1347,7 @@ ocelot_netdevice_lag_changeupper(struct net_device *dev, if (ocelot_port->bond != dev) return NOTIFY_OK; - err = ocelot_netdevice_changeupper(lower, info); + err = ocelot_netdevice_changeupper(lower, dev, info); if (err) return notifier_from_errno(err); } @@ -1385,7 +1386,7 @@ static int ocelot_netdevice_event(struct notifier_block *unused, struct netdev_notifier_changeupper_info *info = ptr; if (ocelot_netdevice_dev_check(dev)) - return ocelot_netdevice_changeupper(dev, info); + return ocelot_netdevice_changeupper(dev, dev, info); if (netif_is_lag_master(dev)) return ocelot_netdevice_lag_changeupper(dev, info); diff --git a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c index 273d529d43..062bb2db68 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c +++ b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c @@ -1141,20 +1141,7 @@ int nfp_fl_ct_del_flow(struct nfp_fl_ct_map_entry *ct_map_ent) nfp_fl_ct_clean_flow_entry(ct_entry); kfree(ct_map_ent); - /* If this is the last pre_ct_rule it means that it is - * very likely that the nft table will be cleaned up next, - * as this happens on the removal of the last act_ct flow. - * However we cannot deregister the callback on the removal - * of the last nft flow as this runs into a deadlock situation. - * So deregister the callback on removal of the last pre_ct flow - * and remove any remaining nft flow entries. We also cannot - * save this state and delete the callback later since the - * nft table would already have been freed at that time. - */ if (!zt->pre_ct_count) { - nf_flow_table_offload_del_cb(zt->nft, - nfp_fl_ct_handle_nft_flow, - zt); zt->nft = NULL; nfp_fl_ct_clean_nft_entries(zt); } @@ -1172,6 +1159,7 @@ int nfp_fl_ct_del_flow(struct nfp_fl_ct_map_entry *ct_map_ent) nfp_ct_map_params); nfp_fl_ct_clean_flow_entry(ct_map_ent->ct_entry); kfree(ct_map_ent); + break; default: break; } diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c index 8543bf3c34..ad655f0a49 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac.c @@ -735,12 +735,13 @@ static int emac_remove(struct platform_device *pdev) put_device(&adpt->phydev->mdio.dev); mdiobus_unregister(adpt->mii_bus); - free_netdev(netdev); if (adpt->phy.digital) iounmap(adpt->phy.digital); iounmap(adpt->phy.base); + free_netdev(netdev); + return 0; } diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c index a3ca406a35..e5b0d795c3 100644 --- a/drivers/net/ethernet/sfc/efx_channels.c +++ b/drivers/net/ethernet/sfc/efx_channels.c @@ -152,6 +152,7 @@ static int efx_allocate_msix_channels(struct efx_nic *efx, * maximum size. */ tx_per_ev = EFX_MAX_EVQ_SIZE / EFX_TXQ_MAX_ENT(efx); + tx_per_ev = min(tx_per_ev, EFX_MAX_TXQ_PER_CHANNEL); n_xdp_tx = num_possible_cpus(); n_xdp_ev = DIV_ROUND_UP(n_xdp_tx, tx_per_ev); @@ -169,6 +170,8 @@ static int efx_allocate_msix_channels(struct efx_nic *efx, netif_err(efx, drv, efx->net_dev, "Insufficient resources for %d XDP event queues (%d other channels, max %d)\n", n_xdp_ev, n_channels, max_channels); + netif_err(efx, drv, efx->net_dev, + "XDP_TX and XDP_REDIRECT will not work on this interface"); efx->n_xdp_channels = 0; efx->xdp_tx_per_channel = 0; efx->xdp_tx_queue_count = 0; @@ -176,12 +179,14 @@ static int efx_allocate_msix_channels(struct efx_nic *efx, netif_err(efx, drv, efx->net_dev, "Insufficient resources for %d XDP TX queues (%d other channels, max VIs %d)\n", n_xdp_tx, n_channels, efx->max_vis); + netif_err(efx, drv, efx->net_dev, + "XDP_TX and XDP_REDIRECT will not work on this interface"); efx->n_xdp_channels = 0; efx->xdp_tx_per_channel = 0; efx->xdp_tx_queue_count = 0; } else { efx->n_xdp_channels = n_xdp_ev; - efx->xdp_tx_per_channel = EFX_MAX_TXQ_PER_CHANNEL; + efx->xdp_tx_per_channel = tx_per_ev; efx->xdp_tx_queue_count = n_xdp_tx; n_channels += n_xdp_ev; netif_dbg(efx, drv, efx->net_dev, @@ -891,18 +896,20 @@ int efx_set_channels(struct efx_nic *efx) if (efx_channel_is_xdp_tx(channel)) { efx_for_each_channel_tx_queue(tx_queue, channel) { tx_queue->queue = next_queue++; - netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is XDP %u, HW %u\n", - channel->channel, tx_queue->label, - xdp_queue_number, tx_queue->queue); + /* We may have a few left-over XDP TX * queues owing to xdp_tx_queue_count * not dividing evenly by EFX_MAX_TXQ_PER_CHANNEL. * We still allocate and probe those * TXQs, but never use them. */ - if (xdp_queue_number < efx->xdp_tx_queue_count) + if (xdp_queue_number < efx->xdp_tx_queue_count) { + netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is XDP %u, HW %u\n", + channel->channel, tx_queue->label, + xdp_queue_number, tx_queue->queue); efx->xdp_tx_queues[xdp_queue_number] = tx_queue; - xdp_queue_number++; + xdp_queue_number++; + } } } else { efx_for_each_channel_tx_queue(tx_queue, channel) { @@ -914,8 +921,7 @@ int efx_set_channels(struct efx_nic *efx) } } } - if (xdp_queue_number) - efx->xdp_tx_queue_count = xdp_queue_number; + WARN_ON(xdp_queue_number != efx->xdp_tx_queue_count); rc = netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels); if (rc) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c index e108b0d2bd..4c9a37dd0d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c @@ -49,9 +49,9 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id { struct plat_stmmacenet_data *plat; struct stmmac_resources res; - bool mdio = false; - int ret, i; struct device_node *np; + int ret, i, phy_mode; + bool mdio = false; np = dev_of_node(&pdev->dev); @@ -108,10 +108,11 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id if (plat->bus_id < 0) plat->bus_id = pci_dev_id(pdev); - plat->phy_interface = device_get_phy_mode(&pdev->dev); - if (plat->phy_interface < 0) + phy_mode = device_get_phy_mode(&pdev->dev); + if (phy_mode < 0) dev_err(&pdev->dev, "phy_mode not found\n"); + plat->phy_interface = phy_mode; plat->interface = PHY_INTERFACE_MODE_GMII; pci_set_master(pdev); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index e735134e84..fcdb1d2038 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -349,6 +349,9 @@ void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue); void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue); void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue); int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags); +struct timespec64 stmmac_calc_tas_basetime(ktime_t old_base_time, + ktime_t current_time, + u64 cycle_time); #if IS_ENABLED(CONFIG_STMMAC_SELFTESTS) void stmmac_selftest_run(struct net_device *dev, diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 8d9d6ecf8c..7b8404a215 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -7171,6 +7171,7 @@ int stmmac_suspend(struct device *dev) priv->plat->rx_queues_to_use, false); stmmac_fpe_handshake(priv, false); + stmmac_fpe_stop_wq(priv); } priv->speed = SPEED_UNKNOWN; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 072eff8079..5ca710844c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -397,6 +397,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac) struct device_node *np = pdev->dev.of_node; struct plat_stmmacenet_data *plat; struct stmmac_dma_cfg *dma_cfg; + int phy_mode; void *ret; int rc; @@ -412,10 +413,11 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac) eth_zero_addr(mac); } - plat->phy_interface = device_get_phy_mode(&pdev->dev); - if (plat->phy_interface < 0) - return ERR_PTR(plat->phy_interface); + phy_mode = device_get_phy_mode(&pdev->dev); + if (phy_mode < 0) + return ERR_PTR(phy_mode); + plat->phy_interface = phy_mode; plat->interface = stmmac_of_get_mac_mode(np); if (plat->interface < 0) plat->interface = plat->phy_interface; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c index 4e86cdf2bc..580cc03553 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c @@ -62,7 +62,8 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta) u32 sec, nsec; u32 quotient, reminder; int neg_adj = 0; - bool xmac; + bool xmac, est_rst = false; + int ret; xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; @@ -75,10 +76,48 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta) sec = quotient; nsec = reminder; + /* If EST is enabled, disabled it before adjust ptp time. */ + if (priv->plat->est && priv->plat->est->enable) { + est_rst = true; + mutex_lock(&priv->plat->est->lock); + priv->plat->est->enable = false; + stmmac_est_configure(priv, priv->ioaddr, priv->plat->est, + priv->plat->clk_ptp_rate); + mutex_unlock(&priv->plat->est->lock); + } + spin_lock_irqsave(&priv->ptp_lock, flags); stmmac_adjust_systime(priv, priv->ptpaddr, sec, nsec, neg_adj, xmac); spin_unlock_irqrestore(&priv->ptp_lock, flags); + /* Caculate new basetime and re-configured EST after PTP time adjust. */ + if (est_rst) { + struct timespec64 current_time, time; + ktime_t current_time_ns, basetime; + u64 cycle_time; + + mutex_lock(&priv->plat->est->lock); + priv->ptp_clock_ops.gettime64(&priv->ptp_clock_ops, ¤t_time); + current_time_ns = timespec64_to_ktime(current_time); + time.tv_nsec = priv->plat->est->btr_reserve[0]; + time.tv_sec = priv->plat->est->btr_reserve[1]; + basetime = timespec64_to_ktime(time); + cycle_time = priv->plat->est->ctr[1] * NSEC_PER_SEC + + priv->plat->est->ctr[0]; + time = stmmac_calc_tas_basetime(basetime, + current_time_ns, + cycle_time); + + priv->plat->est->btr[0] = (u32)time.tv_nsec; + priv->plat->est->btr[1] = (u32)time.tv_sec; + priv->plat->est->enable = true; + ret = stmmac_est_configure(priv, priv->ioaddr, priv->plat->est, + priv->plat->clk_ptp_rate); + mutex_unlock(&priv->plat->est->lock); + if (ret) + netdev_err(priv->dev, "failed to configure EST\n"); + } + return 0; } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c index 92dab609d4..4f3b6437b1 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c @@ -711,12 +711,35 @@ static int tc_setup_cls(struct stmmac_priv *priv, return ret; } +struct timespec64 stmmac_calc_tas_basetime(ktime_t old_base_time, + ktime_t current_time, + u64 cycle_time) +{ + struct timespec64 time; + + if (ktime_after(old_base_time, current_time)) { + time = ktime_to_timespec64(old_base_time); + } else { + s64 n; + ktime_t base_time; + + n = div64_s64(ktime_sub_ns(current_time, old_base_time), + cycle_time); + base_time = ktime_add_ns(old_base_time, + (n + 1) * cycle_time); + + time = ktime_to_timespec64(base_time); + } + + return time; +} + static int tc_setup_taprio(struct stmmac_priv *priv, struct tc_taprio_qopt_offload *qopt) { u32 size, wid = priv->dma_cap.estwid, dep = priv->dma_cap.estdep; struct plat_stmmacenet_data *plat = priv->plat; - struct timespec64 time, current_time; + struct timespec64 time, current_time, qopt_time; ktime_t current_time_ns; bool fpe = false; int i, ret = 0; @@ -773,14 +796,18 @@ static int tc_setup_taprio(struct stmmac_priv *priv, GFP_KERNEL); if (!plat->est) return -ENOMEM; + + mutex_init(&priv->plat->est->lock); } else { memset(plat->est, 0, sizeof(*plat->est)); } size = qopt->num_entries; + mutex_lock(&priv->plat->est->lock); priv->plat->est->gcl_size = size; priv->plat->est->enable = qopt->enable; + mutex_unlock(&priv->plat->est->lock); for (i = 0; i < size; i++) { s64 delta_ns = qopt->entries[i].interval; @@ -811,32 +838,28 @@ static int tc_setup_taprio(struct stmmac_priv *priv, priv->plat->est->gcl[i] = delta_ns | (gates << wid); } + mutex_lock(&priv->plat->est->lock); /* Adjust for real system time */ priv->ptp_clock_ops.gettime64(&priv->ptp_clock_ops, ¤t_time); current_time_ns = timespec64_to_ktime(current_time); - if (ktime_after(qopt->base_time, current_time_ns)) { - time = ktime_to_timespec64(qopt->base_time); - } else { - ktime_t base_time; - s64 n; - - n = div64_s64(ktime_sub_ns(current_time_ns, qopt->base_time), - qopt->cycle_time); - base_time = ktime_add_ns(qopt->base_time, - (n + 1) * qopt->cycle_time); - - time = ktime_to_timespec64(base_time); - } + time = stmmac_calc_tas_basetime(qopt->base_time, current_time_ns, + qopt->cycle_time); priv->plat->est->btr[0] = (u32)time.tv_nsec; priv->plat->est->btr[1] = (u32)time.tv_sec; + qopt_time = ktime_to_timespec64(qopt->base_time); + priv->plat->est->btr_reserve[0] = (u32)qopt_time.tv_nsec; + priv->plat->est->btr_reserve[1] = (u32)qopt_time.tv_sec; + ctr = qopt->cycle_time; priv->plat->est->ctr[0] = do_div(ctr, NSEC_PER_SEC); priv->plat->est->ctr[1] = (u32)ctr; - if (fpe && !priv->dma_cap.fpesel) + if (fpe && !priv->dma_cap.fpesel) { + mutex_unlock(&priv->plat->est->lock); return -EOPNOTSUPP; + } /* Actual FPE register configuration will be done after FPE handshake * is success. @@ -845,6 +868,7 @@ static int tc_setup_taprio(struct stmmac_priv *priv, ret = stmmac_est_configure(priv, priv->ioaddr, priv->plat->est, priv->plat->clk_ptp_rate); + mutex_unlock(&priv->plat->est->lock); if (ret) { netdev_err(priv->dev, "failed to configure EST\n"); goto disable; @@ -860,9 +884,11 @@ static int tc_setup_taprio(struct stmmac_priv *priv, return 0; disable: + mutex_lock(&priv->plat->est->lock); priv->plat->est->enable = false; stmmac_est_configure(priv, priv->ioaddr, priv->plat->est, priv->plat->clk_ptp_rate); + mutex_unlock(&priv->plat->est->lock); priv->plat->fpe_cfg->enable = false; stmmac_fpe_configure(priv, priv->ioaddr, diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c index 0b2ce4bdc2..e0cb713193 100644 --- a/drivers/net/ethernet/ti/tlan.c +++ b/drivers/net/ethernet/ti/tlan.c @@ -313,9 +313,8 @@ static void tlan_remove_one(struct pci_dev *pdev) pci_release_regions(pdev); #endif - free_netdev(dev); - cancel_work_sync(&priv->tlan_tqueue); + free_netdev(dev); } static void tlan_start(struct net_device *dev) diff --git a/drivers/net/fddi/defza.c b/drivers/net/fddi/defza.c index 14f07050b6..0de2c4552f 100644 --- a/drivers/net/fddi/defza.c +++ b/drivers/net/fddi/defza.c @@ -1504,9 +1504,8 @@ static int fza_probe(struct device *bdev) release_mem_region(start, len); err_out_kfree: - free_netdev(dev); - pr_err("%s: initialization failure, aborting!\n", fp->name); + free_netdev(dev); return ret; } diff --git a/drivers/net/netdevsim/ipsec.c b/drivers/net/netdevsim/ipsec.c index 3811f1bde8..b80ed2ffd4 100644 --- a/drivers/net/netdevsim/ipsec.c +++ b/drivers/net/netdevsim/ipsec.c @@ -85,7 +85,7 @@ static int nsim_ipsec_parse_proto_keys(struct xfrm_state *xs, u32 *mykey, u32 *mysalt) { const char aes_gcm_name[] = "rfc4106(gcm(aes))"; - struct net_device *dev = xs->xso.dev; + struct net_device *dev = xs->xso.real_dev; unsigned char *key_data; char *alg_name = NULL; int key_len; @@ -134,7 +134,7 @@ static int nsim_ipsec_add_sa(struct xfrm_state *xs) u16 sa_idx; int ret; - dev = xs->xso.dev; + dev = xs->xso.real_dev; ns = netdev_priv(dev); ipsec = &ns->ipsec; @@ -194,7 +194,7 @@ static int nsim_ipsec_add_sa(struct xfrm_state *xs) static void nsim_ipsec_del_sa(struct xfrm_state *xs) { - struct netdevsim *ns = netdev_priv(xs->xso.dev); + struct netdevsim *ns = netdev_priv(xs->xso.real_dev); struct nsim_ipsec *ipsec = &ns->ipsec; u16 sa_idx; @@ -211,7 +211,7 @@ static void nsim_ipsec_del_sa(struct xfrm_state *xs) static bool nsim_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs) { - struct netdevsim *ns = netdev_priv(xs->xso.dev); + struct netdevsim *ns = netdev_priv(xs->xso.real_dev); struct nsim_ipsec *ipsec = &ns->ipsec; ipsec->ok++; diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c index bbbc6ac8fa..53a4334428 100644 --- a/drivers/net/phy/marvell10g.c +++ b/drivers/net/phy/marvell10g.c @@ -78,6 +78,11 @@ enum { /* Temperature read register (88E2110 only) */ MV_PCS_TEMP = 0x8042, + /* Number of ports on the device */ + MV_PCS_PORT_INFO = 0xd00d, + MV_PCS_PORT_INFO_NPORTS_MASK = 0x0380, + MV_PCS_PORT_INFO_NPORTS_SHIFT = 7, + /* These registers appear at 0x800X and 0xa00X - the 0xa00X control * registers appear to set themselves to the 0x800X when AN is * restarted, but status registers appear readable from either. @@ -966,6 +971,30 @@ static const struct mv3310_chip mv2111_type = { #endif }; +static int mv3310_get_number_of_ports(struct phy_device *phydev) +{ + int ret; + + ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MV_PCS_PORT_INFO); + if (ret < 0) + return ret; + + ret &= MV_PCS_PORT_INFO_NPORTS_MASK; + ret >>= MV_PCS_PORT_INFO_NPORTS_SHIFT; + + return ret + 1; +} + +static int mv3310_match_phy_device(struct phy_device *phydev) +{ + return mv3310_get_number_of_ports(phydev) == 1; +} + +static int mv3340_match_phy_device(struct phy_device *phydev) +{ + return mv3310_get_number_of_ports(phydev) == 4; +} + static int mv211x_match_phy_device(struct phy_device *phydev, bool has_5g) { int val; @@ -994,7 +1023,8 @@ static int mv2111_match_phy_device(struct phy_device *phydev) static struct phy_driver mv3310_drivers[] = { { .phy_id = MARVELL_PHY_ID_88X3310, - .phy_id_mask = MARVELL_PHY_ID_88X33X0_MASK, + .phy_id_mask = MARVELL_PHY_ID_MASK, + .match_phy_device = mv3310_match_phy_device, .name = "mv88x3310", .driver_data = &mv3310_type, .get_features = mv3310_get_features, @@ -1011,8 +1041,9 @@ static struct phy_driver mv3310_drivers[] = { .set_loopback = genphy_c45_loopback, }, { - .phy_id = MARVELL_PHY_ID_88X3340, - .phy_id_mask = MARVELL_PHY_ID_88X33X0_MASK, + .phy_id = MARVELL_PHY_ID_88X3310, + .phy_id_mask = MARVELL_PHY_ID_MASK, + .match_phy_device = mv3340_match_phy_device, .name = "mv88x3340", .driver_data = &mv3340_type, .get_features = mv3310_get_features, @@ -1069,8 +1100,7 @@ static struct phy_driver mv3310_drivers[] = { module_phy_driver(mv3310_drivers); static struct mdio_device_id __maybe_unused mv3310_tbl[] = { - { MARVELL_PHY_ID_88X3310, MARVELL_PHY_ID_88X33X0_MASK }, - { MARVELL_PHY_ID_88X3340, MARVELL_PHY_ID_88X33X0_MASK }, + { MARVELL_PHY_ID_88X3310, MARVELL_PHY_ID_MASK }, { MARVELL_PHY_ID_88E2110, MARVELL_PHY_ID_MASK }, { }, }; diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c index aec97b021a..2c11521642 100644 --- a/drivers/net/usb/asix_devices.c +++ b/drivers/net/usb/asix_devices.c @@ -701,6 +701,7 @@ static int ax88772_init_phy(struct usbnet *dev) return ret; } + phy_suspend(priv->phydev); priv->phydev->mac_managed_pm = 1; phy_attached_info(priv->phydev); diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 8a58a2f013..56c3f85190 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -1771,6 +1771,7 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, { struct scatterlist *sgs[4], hdr, stat; unsigned out_num = 0, tmp; + int ret; /* Caller should know better */ BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)); @@ -1790,7 +1791,12 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, sgs[out_num] = &stat; BUG_ON(out_num + 1 > ARRAY_SIZE(sgs)); - virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC); + ret = virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC); + if (ret < 0) { + dev_warn(&vi->vdev->dev, + "Failed to add sgs for command vq: %d\n.", ret); + return false; + } if (unlikely(!virtqueue_kick(vi->cvq))) return vi->ctrl->status == VIRTIO_NET_OK; diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c index c0bd9cbc43..1b483cf2b1 100644 --- a/drivers/net/vmxnet3/vmxnet3_ethtool.c +++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c @@ -1,7 +1,7 @@ /* * Linux driver for VMware's vmxnet3 ethernet NIC. * - * Copyright (C) 2008-2020, VMware, Inc. All Rights Reserved. + * Copyright (C) 2008-2021, VMware, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the @@ -26,6 +26,10 @@ #include "vmxnet3_int.h" +#include +#include + +#define VXLAN_UDP_PORT 8472 struct vmxnet3_stat_desc { char desc[ETH_GSTRING_LEN]; @@ -262,6 +266,8 @@ netdev_features_t vmxnet3_features_check(struct sk_buff *skb, if (VMXNET3_VERSION_GE_4(adapter) && skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL) { u8 l4_proto = 0; + u16 port; + struct udphdr *udph; switch (vlan_get_protocol(skb)) { case htons(ETH_P_IP): @@ -274,8 +280,20 @@ netdev_features_t vmxnet3_features_check(struct sk_buff *skb, return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); } - if (l4_proto != IPPROTO_UDP) + switch (l4_proto) { + case IPPROTO_UDP: + udph = udp_hdr(skb); + port = be16_to_cpu(udph->dest); + /* Check if offloaded port is supported */ + if (port != GENEVE_UDP_PORT && + port != IANA_VXLAN_UDP_PORT && + port != VXLAN_UDP_PORT) { + return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); + } + break; + default: return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); + } } return features; } diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c index 349ca18088..c54fdae950 100644 --- a/drivers/net/wan/hdlc_cisco.c +++ b/drivers/net/wan/hdlc_cisco.c @@ -364,19 +364,19 @@ static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr) return -EINVAL; } -static int __init mod_init(void) +static int __init hdlc_cisco_init(void) { register_hdlc_protocol(&proto); return 0; } -static void __exit mod_exit(void) +static void __exit hdlc_cisco_exit(void) { unregister_hdlc_protocol(&proto); } -module_init(mod_init); -module_exit(mod_exit); +module_init(hdlc_cisco_init); +module_exit(hdlc_cisco_exit); MODULE_AUTHOR("Krzysztof Halasa "); MODULE_DESCRIPTION("Cisco HDLC protocol support for generic HDLC"); diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c index 72250fe0a1..25e3564ce1 100644 --- a/drivers/net/wan/hdlc_fr.c +++ b/drivers/net/wan/hdlc_fr.c @@ -1279,19 +1279,19 @@ static int fr_ioctl(struct net_device *dev, struct ifreq *ifr) return -EINVAL; } -static int __init mod_init(void) +static int __init hdlc_fr_init(void) { register_hdlc_protocol(&proto); return 0; } -static void __exit mod_exit(void) +static void __exit hdlc_fr_exit(void) { unregister_hdlc_protocol(&proto); } -module_init(mod_init); -module_exit(mod_exit); +module_init(hdlc_fr_init); +module_exit(hdlc_fr_exit); MODULE_AUTHOR("Krzysztof Halasa "); MODULE_DESCRIPTION("Frame-Relay protocol support for generic HDLC"); diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c index 834be2ae3e..b81ecf432a 100644 --- a/drivers/net/wan/hdlc_ppp.c +++ b/drivers/net/wan/hdlc_ppp.c @@ -705,20 +705,20 @@ static int ppp_ioctl(struct net_device *dev, struct ifreq *ifr) return -EINVAL; } -static int __init mod_init(void) +static int __init hdlc_ppp_init(void) { skb_queue_head_init(&tx_queue); register_hdlc_protocol(&proto); return 0; } -static void __exit mod_exit(void) +static void __exit hdlc_ppp_exit(void) { unregister_hdlc_protocol(&proto); } -module_init(mod_init); -module_exit(mod_exit); +module_init(hdlc_ppp_init); +module_exit(hdlc_ppp_exit); MODULE_AUTHOR("Krzysztof Halasa "); MODULE_DESCRIPTION("PPP protocol support for generic HDLC"); diff --git a/drivers/net/wan/hdlc_raw.c b/drivers/net/wan/hdlc_raw.c index 388fcc09b4..54d28496fe 100644 --- a/drivers/net/wan/hdlc_raw.c +++ b/drivers/net/wan/hdlc_raw.c @@ -90,7 +90,7 @@ static int raw_ioctl(struct net_device *dev, struct ifreq *ifr) } -static int __init mod_init(void) +static int __init hdlc_raw_init(void) { register_hdlc_protocol(&proto); return 0; @@ -98,14 +98,14 @@ static int __init mod_init(void) -static void __exit mod_exit(void) +static void __exit hdlc_raw_exit(void) { unregister_hdlc_protocol(&proto); } -module_init(mod_init); -module_exit(mod_exit); +module_init(hdlc_raw_init); +module_exit(hdlc_raw_exit); MODULE_AUTHOR("Krzysztof Halasa "); MODULE_DESCRIPTION("Raw HDLC protocol support for generic HDLC"); diff --git a/drivers/net/wan/hdlc_raw_eth.c b/drivers/net/wan/hdlc_raw_eth.c index c70a518b8b..927596276a 100644 --- a/drivers/net/wan/hdlc_raw_eth.c +++ b/drivers/net/wan/hdlc_raw_eth.c @@ -110,7 +110,7 @@ static int raw_eth_ioctl(struct net_device *dev, struct ifreq *ifr) } -static int __init mod_init(void) +static int __init hdlc_eth_init(void) { register_hdlc_protocol(&proto); return 0; @@ -118,14 +118,14 @@ static int __init mod_init(void) -static void __exit mod_exit(void) +static void __exit hdlc_eth_exit(void) { unregister_hdlc_protocol(&proto); } -module_init(mod_init); -module_exit(mod_exit); +module_init(hdlc_eth_init); +module_exit(hdlc_eth_exit); MODULE_AUTHOR("Krzysztof Halasa "); MODULE_DESCRIPTION("Ethernet encapsulation support for generic HDLC"); diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c index d2bf72bf3b..9b7ebf8bd8 100644 --- a/drivers/net/wan/hdlc_x25.c +++ b/drivers/net/wan/hdlc_x25.c @@ -365,19 +365,19 @@ static int x25_ioctl(struct net_device *dev, struct ifreq *ifr) return -EINVAL; } -static int __init mod_init(void) +static int __init hdlc_x25_init(void) { register_hdlc_protocol(&proto); return 0; } -static void __exit mod_exit(void) +static void __exit hdlc_x25_exit(void) { unregister_hdlc_protocol(&proto); } -module_init(mod_init); -module_exit(mod_exit); +module_init(hdlc_x25_init); +module_exit(hdlc_x25_exit); MODULE_AUTHOR("Krzysztof Halasa "); MODULE_DESCRIPTION("X.25 protocol support for generic HDLC"); diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c index 7fd21049ff..63ec140c9c 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c @@ -389,6 +389,7 @@ static int mt7921_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, case WLAN_CIPHER_SUITE_WEP104: if (!mvif->wep_sta) return -EOPNOTSUPP; + break; case WLAN_CIPHER_SUITE_TKIP: case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_CCMP_256: diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c index c2c4dc1968..cd690c64f6 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c @@ -931,7 +931,7 @@ static int mt7921_load_firmware(struct mt7921_dev *dev) ret = mt76_get_field(dev, MT_CONN_ON_MISC, MT_TOP_MISC2_FW_N9_RDY); if (ret) { dev_dbg(dev->mt76.dev, "Firmware is already download\n"); - return -EIO; + goto fw_loaded; } ret = mt7921_load_patch(dev); @@ -949,6 +949,7 @@ static int mt7921_load_firmware(struct mt7921_dev *dev) return -EIO; } +fw_loaded: mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_FWDL], false); #ifdef CONFIG_PM diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c index 46f76e8aae..0a472ce773 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c +++ b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c @@ -24,15 +24,7 @@ int ipc_imem_sys_wwan_open(struct iosm_imem *ipc_imem, int if_id) return -EIO; } - /* check for the interafce id - * if if_id 1 to 8 then create IP MUX channel sessions. - * To start MUX session from 0 as network interface id would start - * from 1 so map it to if_id = if_id - 1 - */ - if (if_id >= IP_MUX_SESSION_START && if_id <= IP_MUX_SESSION_END) - return ipc_mux_open_session(ipc_imem->mux, if_id - 1); - - return -EINVAL; + return ipc_mux_open_session(ipc_imem->mux, if_id); } /* Release a net link to CP. */ @@ -41,7 +33,7 @@ void ipc_imem_sys_wwan_close(struct iosm_imem *ipc_imem, int if_id, { if (ipc_imem->mux && if_id >= IP_MUX_SESSION_START && if_id <= IP_MUX_SESSION_END) - ipc_mux_close_session(ipc_imem->mux, if_id - 1); + ipc_mux_close_session(ipc_imem->mux, if_id); } /* Tasklet call to do uplink transfer. */ @@ -83,13 +75,8 @@ int ipc_imem_sys_wwan_transmit(struct iosm_imem *ipc_imem, goto out; } - if (if_id >= IP_MUX_SESSION_START && if_id <= IP_MUX_SESSION_END) - /* Route the UL packet through IP MUX Layer */ - ret = ipc_mux_ul_trigger_encode(ipc_imem->mux, - if_id - 1, skb); - else - dev_err(ipc_imem->dev, - "invalid if_id %d: ", if_id); + /* Route the UL packet through IP MUX Layer */ + ret = ipc_mux_ul_trigger_encode(ipc_imem->mux, if_id, skb); out: return ret; } diff --git a/drivers/net/wwan/iosm/iosm_ipc_mux_codec.c b/drivers/net/wwan/iosm/iosm_ipc_mux_codec.c index e634ffc6ec..562de27579 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_mux_codec.c +++ b/drivers/net/wwan/iosm/iosm_ipc_mux_codec.c @@ -288,7 +288,7 @@ static int ipc_mux_net_receive(struct iosm_mux *ipc_mux, int if_id, /* Pass the packet to the netif layer. */ dest_skb->priority = service_class; - return ipc_wwan_receive(wwan, dest_skb, false, if_id + 1); + return ipc_wwan_receive(wwan, dest_skb, false, if_id); } /* Decode Flow Credit Table in the block */ diff --git a/drivers/net/wwan/iosm/iosm_ipc_uevent.c b/drivers/net/wwan/iosm/iosm_ipc_uevent.c index 2229d75292..d12188ffed 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_uevent.c +++ b/drivers/net/wwan/iosm/iosm_ipc_uevent.c @@ -37,7 +37,7 @@ void ipc_uevent_send(struct device *dev, char *uevent) /* Store the device and event information */ info->dev = dev; - snprintf(info->uevent, MAX_UEVENT_LEN, "%s: %s", dev_name(dev), uevent); + snprintf(info->uevent, MAX_UEVENT_LEN, "IOSM_EVENT=%s", uevent); /* Schedule uevent in process context using work queue */ schedule_work(&info->work); diff --git a/drivers/net/wwan/iosm/iosm_ipc_wwan.c b/drivers/net/wwan/iosm/iosm_ipc_wwan.c index c999c64001..b2357ad5d5 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_wwan.c +++ b/drivers/net/wwan/iosm/iosm_ipc_wwan.c @@ -107,6 +107,7 @@ static int ipc_wwan_link_transmit(struct sk_buff *skb, { struct iosm_netdev_priv *priv = wwan_netdev_drvpriv(netdev); struct iosm_wwan *ipc_wwan = priv->ipc_wwan; + unsigned int len = skb->len; int if_id = priv->if_id; int ret; @@ -123,6 +124,8 @@ static int ipc_wwan_link_transmit(struct sk_buff *skb, /* Return code of zero is success */ if (ret == 0) { + netdev->stats.tx_packets++; + netdev->stats.tx_bytes += len; ret = NETDEV_TX_OK; } else if (ret == -EBUSY) { ret = NETDEV_TX_BUSY; @@ -140,7 +143,8 @@ static int ipc_wwan_link_transmit(struct sk_buff *skb, ret); dev_kfree_skb_any(skb); - return ret; + netdev->stats.tx_dropped++; + return NETDEV_TX_OK; } /* Ops structure for wwan net link */ @@ -158,6 +162,7 @@ static void ipc_wwan_setup(struct net_device *iosm_dev) iosm_dev->priv_flags |= IFF_NO_QUEUE; iosm_dev->type = ARPHRD_NONE; + iosm_dev->mtu = ETH_DATA_LEN; iosm_dev->min_mtu = ETH_MIN_MTU; iosm_dev->max_mtu = ETH_MAX_MTU; @@ -252,8 +257,8 @@ int ipc_wwan_receive(struct iosm_wwan *ipc_wwan, struct sk_buff *skb_arg, skb->pkt_type = PACKET_HOST; - if (if_id < (IP_MUX_SESSION_START - 1) || - if_id > (IP_MUX_SESSION_END - 1)) { + if (if_id < IP_MUX_SESSION_START || + if_id > IP_MUX_SESSION_END) { ret = -EINVAL; goto free; } diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index d3c5086673..320051f5a3 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -1554,6 +1554,28 @@ static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid) wmb(); /* ensure the first interrupt sees the initialization */ } +/* + * Try getting shutdown_lock while setting up IO queues. + */ +static int nvme_setup_io_queues_trylock(struct nvme_dev *dev) +{ + /* + * Give up if the lock is being held by nvme_dev_disable. + */ + if (!mutex_trylock(&dev->shutdown_lock)) + return -ENODEV; + + /* + * Controller is in wrong state, fail early. + */ + if (dev->ctrl.state != NVME_CTRL_CONNECTING) { + mutex_unlock(&dev->shutdown_lock); + return -ENODEV; + } + + return 0; +} + static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled) { struct nvme_dev *dev = nvmeq->dev; @@ -1582,8 +1604,11 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled) goto release_cq; nvmeq->cq_vector = vector; - nvme_init_queue(nvmeq, qid); + result = nvme_setup_io_queues_trylock(dev); + if (result) + return result; + nvme_init_queue(nvmeq, qid); if (!polled) { result = queue_request_irq(nvmeq); if (result < 0) @@ -1591,10 +1616,12 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled) } set_bit(NVMEQ_ENABLED, &nvmeq->flags); + mutex_unlock(&dev->shutdown_lock); return result; release_sq: dev->online_queues--; + mutex_unlock(&dev->shutdown_lock); adapter_delete_sq(dev, qid); release_cq: adapter_delete_cq(dev, qid); @@ -2167,7 +2194,18 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) if (nr_io_queues == 0) return 0; - clear_bit(NVMEQ_ENABLED, &adminq->flags); + /* + * Free IRQ resources as soon as NVMEQ_ENABLED bit transitions + * from set to unset. If there is a window to it is truely freed, + * pci_free_irq_vectors() jumping into this window will crash. + * And take lock to avoid racing with pci_free_irq_vectors() in + * nvme_dev_disable() path. + */ + result = nvme_setup_io_queues_trylock(dev); + if (result) + return result; + if (test_and_clear_bit(NVMEQ_ENABLED, &adminq->flags)) + pci_free_irq(pdev, 0, adminq); if (dev->cmb_use_sqes) { result = nvme_cmb_qdepth(dev, nr_io_queues, @@ -2183,14 +2221,17 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) result = nvme_remap_bar(dev, size); if (!result) break; - if (!--nr_io_queues) - return -ENOMEM; + if (!--nr_io_queues) { + result = -ENOMEM; + goto out_unlock; + } } while (1); adminq->q_db = dev->dbs; retry: /* Deregister the admin queue's interrupt */ - pci_free_irq(pdev, 0, adminq); + if (test_and_clear_bit(NVMEQ_ENABLED, &adminq->flags)) + pci_free_irq(pdev, 0, adminq); /* * If we enable msix early due to not intx, disable it again before @@ -2199,8 +2240,10 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) pci_free_irq_vectors(pdev); result = nvme_setup_irqs(dev, nr_io_queues); - if (result <= 0) - return -EIO; + if (result <= 0) { + result = -EIO; + goto out_unlock; + } dev->num_vecs = result; result = max(result - 1, 1); @@ -2214,8 +2257,9 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) */ result = queue_request_irq(adminq); if (result) - return result; + goto out_unlock; set_bit(NVMEQ_ENABLED, &adminq->flags); + mutex_unlock(&dev->shutdown_lock); result = nvme_create_io_queues(dev); if (result || dev->online_queues < 2) @@ -2224,6 +2268,9 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) if (dev->online_queues - 1 < dev->max_qid) { nr_io_queues = dev->online_queues - 1; nvme_disable_io_queues(dev); + result = nvme_setup_io_queues_trylock(dev); + if (result) + return result; nvme_suspend_io_queues(dev); goto retry; } @@ -2232,6 +2279,9 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) dev->io_queues[HCTX_TYPE_READ], dev->io_queues[HCTX_TYPE_POLL]); return 0; +out_unlock: + mutex_unlock(&dev->shutdown_lock); + return result; } static void nvme_del_queue_end(struct request *req, blk_status_t error) @@ -2962,7 +3012,6 @@ static void nvme_remove(struct pci_dev *pdev) if (!pci_device_is_present(pdev)) { nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD); nvme_dev_disable(dev, true); - nvme_dev_remove_admin(dev); } flush_work(&dev->ctrl.reset_work); diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 12acfe05cd..8cb15ee5b2 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -123,7 +123,6 @@ struct nvme_tcp_ctrl { struct blk_mq_tag_set admin_tag_set; struct sockaddr_storage addr; struct sockaddr_storage src_addr; - struct net_device *ndev; struct nvme_ctrl ctrl; struct work_struct err_work; @@ -2533,8 +2532,7 @@ static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev, } if (opts->mask & NVMF_OPT_HOST_IFACE) { - ctrl->ndev = dev_get_by_name(&init_net, opts->host_iface); - if (!ctrl->ndev) { + if (!__dev_get_by_name(&init_net, opts->host_iface)) { pr_err("invalid interface passed: %s\n", opts->host_iface); ret = -ENODEV; diff --git a/drivers/power/supply/ab8500_fg.c b/drivers/power/supply/ab8500_fg.c index 3d45ed0157..a6ebdb269f 100644 --- a/drivers/power/supply/ab8500_fg.c +++ b/drivers/power/supply/ab8500_fg.c @@ -1728,6 +1728,7 @@ static void ab8500_fg_algorithm_calibrate(struct ab8500_fg *di) break; case AB8500_FG_CALIB_WAIT: dev_dbg(di->dev, "Calibration WFI\n"); + break; default: break; } @@ -2224,6 +2225,7 @@ static int ab8500_fg_get_ext_psy_data(struct device *dev, void *data) queue_work(di->fg_wq, &di->fg_work); break; } + break; default: break; } diff --git a/drivers/power/supply/abx500_chargalg.c b/drivers/power/supply/abx500_chargalg.c index a17849bfac..b72826cf67 100644 --- a/drivers/power/supply/abx500_chargalg.c +++ b/drivers/power/supply/abx500_chargalg.c @@ -1150,6 +1150,7 @@ static int abx500_chargalg_get_ext_psy_data(struct device *dev, void *data) default: break; } + break; default: break; } diff --git a/drivers/ptp/Makefile b/drivers/ptp/Makefile index 8673d1743f..28a6fe342d 100644 --- a/drivers/ptp/Makefile +++ b/drivers/ptp/Makefile @@ -3,7 +3,7 @@ # Makefile for PTP 1588 clock support. # -ptp-y := ptp_clock.o ptp_chardev.o ptp_sysfs.o +ptp-y := ptp_clock.o ptp_chardev.o ptp_sysfs.o ptp_vclock.o ptp_kvm-$(CONFIG_X86) := ptp_kvm_x86.o ptp_kvm_common.o ptp_kvm-$(CONFIG_HAVE_ARM_SMCCC) := ptp_kvm_arm.o ptp_kvm_common.o obj-$(CONFIG_PTP_1588_CLOCK) += ptp.o diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c index a23a37a4d5..4dfc52e067 100644 --- a/drivers/ptp/ptp_clock.c +++ b/drivers/ptp/ptp_clock.c @@ -24,10 +24,11 @@ #define PTP_PPS_EVENT PPS_CAPTUREASSERT #define PTP_PPS_MODE (PTP_PPS_DEFAULTS | PPS_CANWAIT | PPS_TSFMT_TSPEC) +struct class *ptp_class; + /* private globals */ static dev_t ptp_devt; -static struct class *ptp_class; static DEFINE_IDA(ptp_clocks_map); @@ -76,6 +77,11 @@ static int ptp_clock_settime(struct posix_clock *pc, const struct timespec64 *tp { struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock); + if (ptp_vclock_in_use(ptp)) { + pr_err("ptp: virtual clock in use\n"); + return -EBUSY; + } + return ptp->info->settime64(ptp->info, tp); } @@ -97,6 +103,11 @@ static int ptp_clock_adjtime(struct posix_clock *pc, struct __kernel_timex *tx) struct ptp_clock_info *ops; int err = -EOPNOTSUPP; + if (ptp_vclock_in_use(ptp)) { + pr_err("ptp: virtual clock in use\n"); + return -EBUSY; + } + ops = ptp->info; if (tx->modes & ADJ_SETOFFSET) { @@ -161,6 +172,7 @@ static void ptp_clock_release(struct device *dev) ptp_cleanup_pin_groups(ptp); mutex_destroy(&ptp->tsevq_mux); mutex_destroy(&ptp->pincfg_mux); + mutex_destroy(&ptp->n_vclocks_mux); ida_simple_remove(&ptp_clocks_map, ptp->index); kfree(ptp); } @@ -185,6 +197,7 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, { struct ptp_clock *ptp; int err = 0, index, major = MAJOR(ptp_devt); + size_t size; if (info->n_alarm > PTP_MAX_ALARMS) return ERR_PTR(-EINVAL); @@ -208,6 +221,7 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, spin_lock_init(&ptp->tsevq.lock); mutex_init(&ptp->tsevq_mux); mutex_init(&ptp->pincfg_mux); + mutex_init(&ptp->n_vclocks_mux); init_waitqueue_head(&ptp->tsev_wq); if (ptp->info->do_aux_work) { @@ -218,7 +232,22 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, pr_err("failed to create ptp aux_worker %d\n", err); goto kworker_err; } - ptp->pps_source->lookup_cookie = ptp; + } + + /* PTP virtual clock is being registered under physical clock */ + if (parent && parent->class && parent->class->name && + strcmp(parent->class->name, "ptp") == 0) + ptp->is_virtual_clock = true; + + if (!ptp->is_virtual_clock) { + ptp->max_vclocks = PTP_DEFAULT_MAX_VCLOCKS; + + size = sizeof(int) * ptp->max_vclocks; + ptp->vclock_index = kzalloc(size, GFP_KERNEL); + if (!ptp->vclock_index) { + err = -ENOMEM; + goto no_mem_for_vclocks; + } } err = ptp_populate_pin_groups(ptp); @@ -238,6 +267,7 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, pr_err("failed to register pps source\n"); goto no_pps; } + ptp->pps_source->lookup_cookie = ptp; } /* Initialize a new device of our class in our clock structure. */ @@ -265,11 +295,14 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, no_pps: ptp_cleanup_pin_groups(ptp); no_pin_groups: + kfree(ptp->vclock_index); +no_mem_for_vclocks: if (ptp->kworker) kthread_destroy_worker(ptp->kworker); kworker_err: mutex_destroy(&ptp->tsevq_mux); mutex_destroy(&ptp->pincfg_mux); + mutex_destroy(&ptp->n_vclocks_mux); ida_simple_remove(&ptp_clocks_map, index); no_slot: kfree(ptp); @@ -280,9 +313,16 @@ EXPORT_SYMBOL(ptp_clock_register); int ptp_clock_unregister(struct ptp_clock *ptp) { + if (ptp_vclock_in_use(ptp)) { + pr_err("ptp: virtual clock in use\n"); + return -EBUSY; + } + ptp->defunct = 1; wake_up_interruptible(&ptp->tsev_wq); + kfree(ptp->vclock_index); + if (ptp->kworker) { kthread_cancel_delayed_work_sync(&ptp->aux_work); kthread_destroy_worker(ptp->kworker); diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h index 6b97155148..dba6be4770 100644 --- a/drivers/ptp/ptp_private.h +++ b/drivers/ptp/ptp_private.h @@ -18,6 +18,7 @@ #define PTP_MAX_TIMESTAMPS 128 #define PTP_BUF_TIMESTAMPS 30 +#define PTP_DEFAULT_MAX_VCLOCKS 20 struct timestamp_event_queue { struct ptp_extts_event buf[PTP_MAX_TIMESTAMPS]; @@ -46,6 +47,24 @@ struct ptp_clock { const struct attribute_group *pin_attr_groups[2]; struct kthread_worker *kworker; struct kthread_delayed_work aux_work; + unsigned int max_vclocks; + unsigned int n_vclocks; + int *vclock_index; + struct mutex n_vclocks_mux; /* protect concurrent n_vclocks access */ + bool is_virtual_clock; +}; + +#define info_to_vclock(d) container_of((d), struct ptp_vclock, info) +#define cc_to_vclock(d) container_of((d), struct ptp_vclock, cc) +#define dw_to_vclock(d) container_of((d), struct ptp_vclock, refresh_work) + +struct ptp_vclock { + struct ptp_clock *pclock; + struct ptp_clock_info info; + struct ptp_clock *clock; + struct cyclecounter cc; + struct timecounter tc; + spinlock_t lock; /* protects tc/cc */ }; /* @@ -61,6 +80,24 @@ static inline int queue_cnt(struct timestamp_event_queue *q) return cnt < 0 ? PTP_MAX_TIMESTAMPS + cnt : cnt; } +/* Check if ptp virtual clock is in use */ +static inline bool ptp_vclock_in_use(struct ptp_clock *ptp) +{ + bool in_use = false; + + if (mutex_lock_interruptible(&ptp->n_vclocks_mux)) + return true; + + if (!ptp->is_virtual_clock && ptp->n_vclocks) + in_use = true; + + mutex_unlock(&ptp->n_vclocks_mux); + + return in_use; +} + +extern struct class *ptp_class; + /* * see ptp_chardev.c */ @@ -89,4 +126,6 @@ extern const struct attribute_group *ptp_groups[]; int ptp_populate_pin_groups(struct ptp_clock *ptp); void ptp_cleanup_pin_groups(struct ptp_clock *ptp); +struct ptp_vclock *ptp_vclock_register(struct ptp_clock *pclock); +void ptp_vclock_unregister(struct ptp_vclock *vclock); #endif diff --git a/drivers/ptp/ptp_sysfs.c b/drivers/ptp/ptp_sysfs.c index be076a91e2..b3d96b7472 100644 --- a/drivers/ptp/ptp_sysfs.c +++ b/drivers/ptp/ptp_sysfs.c @@ -3,6 +3,7 @@ * PTP 1588 clock support - sysfs interface. * * Copyright (C) 2010 OMICRON electronics GmbH + * Copyright 2021 NXP */ #include #include @@ -148,6 +149,159 @@ static ssize_t pps_enable_store(struct device *dev, } static DEVICE_ATTR(pps_enable, 0220, NULL, pps_enable_store); +static int unregister_vclock(struct device *dev, void *data) +{ + struct ptp_clock *ptp = dev_get_drvdata(dev); + struct ptp_clock_info *info = ptp->info; + struct ptp_vclock *vclock; + u8 *num = data; + + vclock = info_to_vclock(info); + dev_info(dev->parent, "delete virtual clock ptp%d\n", + vclock->clock->index); + + ptp_vclock_unregister(vclock); + (*num)--; + + /* For break. Not error. */ + if (*num == 0) + return -EINVAL; + + return 0; +} + +static ssize_t n_vclocks_show(struct device *dev, + struct device_attribute *attr, char *page) +{ + struct ptp_clock *ptp = dev_get_drvdata(dev); + ssize_t size; + + if (mutex_lock_interruptible(&ptp->n_vclocks_mux)) + return -ERESTARTSYS; + + size = snprintf(page, PAGE_SIZE - 1, "%u\n", ptp->n_vclocks); + + mutex_unlock(&ptp->n_vclocks_mux); + + return size; +} + +static ssize_t n_vclocks_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ptp_clock *ptp = dev_get_drvdata(dev); + struct ptp_vclock *vclock; + int err = -EINVAL; + u32 num, i; + + if (kstrtou32(buf, 0, &num)) + return err; + + if (mutex_lock_interruptible(&ptp->n_vclocks_mux)) + return -ERESTARTSYS; + + if (num > ptp->max_vclocks) { + dev_err(dev, "max value is %d\n", ptp->max_vclocks); + goto out; + } + + /* Need to create more vclocks */ + if (num > ptp->n_vclocks) { + for (i = 0; i < num - ptp->n_vclocks; i++) { + vclock = ptp_vclock_register(ptp); + if (!vclock) + goto out; + + *(ptp->vclock_index + ptp->n_vclocks + i) = + vclock->clock->index; + + dev_info(dev, "new virtual clock ptp%d\n", + vclock->clock->index); + } + } + + /* Need to delete vclocks */ + if (num < ptp->n_vclocks) { + i = ptp->n_vclocks - num; + device_for_each_child_reverse(dev, &i, + unregister_vclock); + + for (i = 1; i <= ptp->n_vclocks - num; i++) + *(ptp->vclock_index + ptp->n_vclocks - i) = -1; + } + + if (num == 0) + dev_info(dev, "only physical clock in use now\n"); + else + dev_info(dev, "guarantee physical clock free running\n"); + + ptp->n_vclocks = num; + mutex_unlock(&ptp->n_vclocks_mux); + + return count; +out: + mutex_unlock(&ptp->n_vclocks_mux); + return err; +} +static DEVICE_ATTR_RW(n_vclocks); + +static ssize_t max_vclocks_show(struct device *dev, + struct device_attribute *attr, char *page) +{ + struct ptp_clock *ptp = dev_get_drvdata(dev); + ssize_t size; + + size = snprintf(page, PAGE_SIZE - 1, "%u\n", ptp->max_vclocks); + + return size; +} + +static ssize_t max_vclocks_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ptp_clock *ptp = dev_get_drvdata(dev); + unsigned int *vclock_index; + int err = -EINVAL; + size_t size; + u32 max; + + if (kstrtou32(buf, 0, &max) || max == 0) + return -EINVAL; + + if (max == ptp->max_vclocks) + return count; + + if (mutex_lock_interruptible(&ptp->n_vclocks_mux)) + return -ERESTARTSYS; + + if (max < ptp->n_vclocks) + goto out; + + size = sizeof(int) * max; + vclock_index = kzalloc(size, GFP_KERNEL); + if (!vclock_index) { + err = -ENOMEM; + goto out; + } + + size = sizeof(int) * ptp->n_vclocks; + memcpy(vclock_index, ptp->vclock_index, size); + + kfree(ptp->vclock_index); + ptp->vclock_index = vclock_index; + ptp->max_vclocks = max; + + mutex_unlock(&ptp->n_vclocks_mux); + + return count; +out: + mutex_unlock(&ptp->n_vclocks_mux); + return err; +} +static DEVICE_ATTR_RW(max_vclocks); + static struct attribute *ptp_attrs[] = { &dev_attr_clock_name.attr, @@ -162,6 +316,8 @@ static struct attribute *ptp_attrs[] = { &dev_attr_fifo.attr, &dev_attr_period.attr, &dev_attr_pps_enable.attr, + &dev_attr_n_vclocks.attr, + &dev_attr_max_vclocks.attr, NULL }; @@ -183,6 +339,10 @@ static umode_t ptp_is_attribute_visible(struct kobject *kobj, } else if (attr == &dev_attr_pps_enable.attr) { if (!info->pps) mode = 0; + } else if (attr == &dev_attr_n_vclocks.attr || + attr == &dev_attr_max_vclocks.attr) { + if (ptp->is_virtual_clock) + mode = 0; } return mode; diff --git a/drivers/ptp/ptp_vclock.c b/drivers/ptp/ptp_vclock.c new file mode 100644 index 0000000000..e0f87c5774 --- /dev/null +++ b/drivers/ptp/ptp_vclock.c @@ -0,0 +1,219 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * PTP virtual clock driver + * + * Copyright 2021 NXP + */ +#include +#include "ptp_private.h" + +#define PTP_VCLOCK_CC_SHIFT 31 +#define PTP_VCLOCK_CC_MULT (1 << PTP_VCLOCK_CC_SHIFT) +#define PTP_VCLOCK_FADJ_SHIFT 9 +#define PTP_VCLOCK_FADJ_DENOMINATOR 15625ULL +#define PTP_VCLOCK_REFRESH_INTERVAL (HZ * 2) + +static int ptp_vclock_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) +{ + struct ptp_vclock *vclock = info_to_vclock(ptp); + unsigned long flags; + s64 adj; + + adj = (s64)scaled_ppm << PTP_VCLOCK_FADJ_SHIFT; + adj = div_s64(adj, PTP_VCLOCK_FADJ_DENOMINATOR); + + spin_lock_irqsave(&vclock->lock, flags); + timecounter_read(&vclock->tc); + vclock->cc.mult = PTP_VCLOCK_CC_MULT + adj; + spin_unlock_irqrestore(&vclock->lock, flags); + + return 0; +} + +static int ptp_vclock_adjtime(struct ptp_clock_info *ptp, s64 delta) +{ + struct ptp_vclock *vclock = info_to_vclock(ptp); + unsigned long flags; + + spin_lock_irqsave(&vclock->lock, flags); + timecounter_adjtime(&vclock->tc, delta); + spin_unlock_irqrestore(&vclock->lock, flags); + + return 0; +} + +static int ptp_vclock_gettime(struct ptp_clock_info *ptp, + struct timespec64 *ts) +{ + struct ptp_vclock *vclock = info_to_vclock(ptp); + unsigned long flags; + u64 ns; + + spin_lock_irqsave(&vclock->lock, flags); + ns = timecounter_read(&vclock->tc); + spin_unlock_irqrestore(&vclock->lock, flags); + *ts = ns_to_timespec64(ns); + + return 0; +} + +static int ptp_vclock_settime(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + struct ptp_vclock *vclock = info_to_vclock(ptp); + u64 ns = timespec64_to_ns(ts); + unsigned long flags; + + spin_lock_irqsave(&vclock->lock, flags); + timecounter_init(&vclock->tc, &vclock->cc, ns); + spin_unlock_irqrestore(&vclock->lock, flags); + + return 0; +} + +static long ptp_vclock_refresh(struct ptp_clock_info *ptp) +{ + struct ptp_vclock *vclock = info_to_vclock(ptp); + struct timespec64 ts; + + ptp_vclock_gettime(&vclock->info, &ts); + + return PTP_VCLOCK_REFRESH_INTERVAL; +} + +static const struct ptp_clock_info ptp_vclock_info = { + .owner = THIS_MODULE, + .name = "ptp virtual clock", + /* The maximum ppb value that long scaled_ppm can support */ + .max_adj = 32767999, + .adjfine = ptp_vclock_adjfine, + .adjtime = ptp_vclock_adjtime, + .gettime64 = ptp_vclock_gettime, + .settime64 = ptp_vclock_settime, + .do_aux_work = ptp_vclock_refresh, +}; + +static u64 ptp_vclock_read(const struct cyclecounter *cc) +{ + struct ptp_vclock *vclock = cc_to_vclock(cc); + struct ptp_clock *ptp = vclock->pclock; + struct timespec64 ts = {}; + + if (ptp->info->gettimex64) + ptp->info->gettimex64(ptp->info, &ts, NULL); + else + ptp->info->gettime64(ptp->info, &ts); + + return timespec64_to_ns(&ts); +} + +static const struct cyclecounter ptp_vclock_cc = { + .read = ptp_vclock_read, + .mask = CYCLECOUNTER_MASK(32), + .mult = PTP_VCLOCK_CC_MULT, + .shift = PTP_VCLOCK_CC_SHIFT, +}; + +struct ptp_vclock *ptp_vclock_register(struct ptp_clock *pclock) +{ + struct ptp_vclock *vclock; + + vclock = kzalloc(sizeof(*vclock), GFP_KERNEL); + if (!vclock) + return NULL; + + vclock->pclock = pclock; + vclock->info = ptp_vclock_info; + vclock->cc = ptp_vclock_cc; + + snprintf(vclock->info.name, PTP_CLOCK_NAME_LEN, "ptp%d_virt", + pclock->index); + + spin_lock_init(&vclock->lock); + + vclock->clock = ptp_clock_register(&vclock->info, &pclock->dev); + if (IS_ERR_OR_NULL(vclock->clock)) { + kfree(vclock); + return NULL; + } + + timecounter_init(&vclock->tc, &vclock->cc, 0); + ptp_schedule_worker(vclock->clock, PTP_VCLOCK_REFRESH_INTERVAL); + + return vclock; +} + +void ptp_vclock_unregister(struct ptp_vclock *vclock) +{ + ptp_clock_unregister(vclock->clock); + kfree(vclock); +} + +int ptp_get_vclocks_index(int pclock_index, int **vclock_index) +{ + char name[PTP_CLOCK_NAME_LEN] = ""; + struct ptp_clock *ptp; + struct device *dev; + int num = 0; + + if (pclock_index < 0) + return num; + + snprintf(name, PTP_CLOCK_NAME_LEN, "ptp%d", pclock_index); + dev = class_find_device_by_name(ptp_class, name); + if (!dev) + return num; + + ptp = dev_get_drvdata(dev); + + if (mutex_lock_interruptible(&ptp->n_vclocks_mux)) { + put_device(dev); + return num; + } + + *vclock_index = kzalloc(sizeof(int) * ptp->n_vclocks, GFP_KERNEL); + if (!(*vclock_index)) + goto out; + + memcpy(*vclock_index, ptp->vclock_index, sizeof(int) * ptp->n_vclocks); + num = ptp->n_vclocks; +out: + mutex_unlock(&ptp->n_vclocks_mux); + put_device(dev); + return num; +} +EXPORT_SYMBOL(ptp_get_vclocks_index); + +void ptp_convert_timestamp(struct skb_shared_hwtstamps *hwtstamps, + int vclock_index) +{ + char name[PTP_CLOCK_NAME_LEN] = ""; + struct ptp_vclock *vclock; + struct ptp_clock *ptp; + unsigned long flags; + struct device *dev; + u64 ns; + + snprintf(name, PTP_CLOCK_NAME_LEN, "ptp%d", vclock_index); + dev = class_find_device_by_name(ptp_class, name); + if (!dev) + return; + + ptp = dev_get_drvdata(dev); + if (!ptp->is_virtual_clock) { + put_device(dev); + return; + } + + vclock = info_to_vclock(ptp->info); + + ns = ktime_to_ns(hwtstamps->hwtstamp); + + spin_lock_irqsave(&vclock->lock, flags); + ns = timecounter_cyc2time(&vclock->tc, ns); + spin_unlock_irqrestore(&vclock->lock, flags); + + put_device(dev); + hwtstamps->hwtstamp = ns_to_ktime(ns); +} +EXPORT_SYMBOL(ptp_convert_timestamp); diff --git a/drivers/pwm/pwm-berlin.c b/drivers/pwm/pwm-berlin.c index 5537b5f6dd..e157273fd2 100644 --- a/drivers/pwm/pwm-berlin.c +++ b/drivers/pwm/pwm-berlin.c @@ -190,12 +190,9 @@ static int berlin_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, return 0; } - if (state->period != pwm->state.period || - state->duty_cycle != pwm->state.duty_cycle) { - err = berlin_pwm_config(chip, pwm, state->duty_cycle, state->period); - if (err) - return err; - } + err = berlin_pwm_config(chip, pwm, state->duty_cycle, state->period); + if (err) + return err; if (!enabled) return berlin_pwm_enable(chip, pwm); diff --git a/drivers/pwm/pwm-ep93xx.c b/drivers/pwm/pwm-ep93xx.c index 8a3d781e65..fc3cb7d669 100644 --- a/drivers/pwm/pwm-ep93xx.c +++ b/drivers/pwm/pwm-ep93xx.c @@ -64,6 +64,11 @@ static int ep93xx_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, int ret; struct ep93xx_pwm *ep93xx_pwm = to_ep93xx_pwm(chip); bool enabled = state->enabled; + void __iomem *base = ep93xx_pwm->base; + unsigned long long c; + unsigned long period_cycles; + unsigned long duty_cycles; + unsigned long term; if (state->polarity != pwm->state.polarity) { if (enabled) { @@ -97,58 +102,48 @@ static int ep93xx_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, return 0; } - if (state->period != pwm->state.period || - state->duty_cycle != pwm->state.duty_cycle) { - struct ep93xx_pwm *ep93xx_pwm = to_ep93xx_pwm(chip); - void __iomem *base = ep93xx_pwm->base; - unsigned long long c; - unsigned long period_cycles; - unsigned long duty_cycles; - unsigned long term; - - /* - * The clock needs to be enabled to access the PWM registers. - * Configuration can be changed at any time. - */ - if (!pwm_is_enabled(pwm)) { - ret = clk_prepare_enable(ep93xx_pwm->clk); - if (ret) - return ret; - } - - c = clk_get_rate(ep93xx_pwm->clk); - c *= state->period; - do_div(c, 1000000000); - period_cycles = c; - - c = period_cycles; - c *= state->duty_cycle; - do_div(c, state->period); - duty_cycles = c; - - if (period_cycles < 0x10000 && duty_cycles < 0x10000) { - term = readw(base + EP93XX_PWMx_TERM_COUNT); - - /* Order is important if PWM is running */ - if (period_cycles > term) { - writew(period_cycles, base + EP93XX_PWMx_TERM_COUNT); - writew(duty_cycles, base + EP93XX_PWMx_DUTY_CYCLE); - } else { - writew(duty_cycles, base + EP93XX_PWMx_DUTY_CYCLE); - writew(period_cycles, base + EP93XX_PWMx_TERM_COUNT); - } - ret = 0; - } else { - ret = -EINVAL; - } - - if (!pwm_is_enabled(pwm)) - clk_disable_unprepare(ep93xx_pwm->clk); - + /* + * The clock needs to be enabled to access the PWM registers. + * Configuration can be changed at any time. + */ + if (!pwm_is_enabled(pwm)) { + ret = clk_prepare_enable(ep93xx_pwm->clk); if (ret) return ret; } + c = clk_get_rate(ep93xx_pwm->clk); + c *= state->period; + do_div(c, 1000000000); + period_cycles = c; + + c = period_cycles; + c *= state->duty_cycle; + do_div(c, state->period); + duty_cycles = c; + + if (period_cycles < 0x10000 && duty_cycles < 0x10000) { + term = readw(base + EP93XX_PWMx_TERM_COUNT); + + /* Order is important if PWM is running */ + if (period_cycles > term) { + writew(period_cycles, base + EP93XX_PWMx_TERM_COUNT); + writew(duty_cycles, base + EP93XX_PWMx_DUTY_CYCLE); + } else { + writew(duty_cycles, base + EP93XX_PWMx_DUTY_CYCLE); + writew(period_cycles, base + EP93XX_PWMx_TERM_COUNT); + } + ret = 0; + } else { + ret = -EINVAL; + } + + if (!pwm_is_enabled(pwm)) + clk_disable_unprepare(ep93xx_pwm->clk); + + if (ret) + return ret; + if (!enabled) { ret = clk_prepare_enable(ep93xx_pwm->clk); if (ret) diff --git a/drivers/pwm/pwm-spear.c b/drivers/pwm/pwm-spear.c index 48c31dac2f..54c7990967 100644 --- a/drivers/pwm/pwm-spear.c +++ b/drivers/pwm/pwm-spear.c @@ -177,12 +177,9 @@ static int spear_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, return 0; } - if (state->period != pwm->state.period || - state->duty_cycle != pwm->state.duty_cycle) { - err = spear_pwm_config(chip, pwm, state->duty_cycle, state->period); - if (err) - return err; - } + err = spear_pwm_config(chip, pwm, state->duty_cycle, state->period); + if (err) + return err; if (!pwm->state.enabled) return spear_pwm_enable(chip, pwm); diff --git a/drivers/pwm/pwm-sprd.c b/drivers/pwm/pwm-sprd.c index f2a85e8dd9..7004f55bbf 100644 --- a/drivers/pwm/pwm-sprd.c +++ b/drivers/pwm/pwm-sprd.c @@ -183,13 +183,10 @@ static int sprd_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, } } - if (state->period != cstate->period || - state->duty_cycle != cstate->duty_cycle) { - ret = sprd_pwm_config(spc, pwm, state->duty_cycle, - state->period); - if (ret) - return ret; - } + ret = sprd_pwm_config(spc, pwm, state->duty_cycle, + state->period); + if (ret) + return ret; sprd_pwm_write(spc, pwm->hwpwm, SPRD_PWM_ENABLE, 1); } else if (cstate->enabled) { diff --git a/drivers/pwm/pwm-tiecap.c b/drivers/pwm/pwm-tiecap.c index dec3f1fb15..35eb19a5a0 100644 --- a/drivers/pwm/pwm-tiecap.c +++ b/drivers/pwm/pwm-tiecap.c @@ -189,16 +189,13 @@ static int ecap_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, return 0; } - if (state->period != pwm->state.period || - state->duty_cycle != pwm->state.duty_cycle) { - if (state->period > NSEC_PER_SEC) - return -ERANGE; + if (state->period > NSEC_PER_SEC) + return -ERANGE; - err = ecap_pwm_config(chip, pwm, state->duty_cycle, - state->period, enabled); - if (err) - return err; - } + err = ecap_pwm_config(chip, pwm, state->duty_cycle, + state->period, enabled); + if (err) + return err; if (!enabled) return ecap_pwm_enable(chip, pwm); diff --git a/drivers/s390/char/tape_char.c b/drivers/s390/char/tape_char.c index 8abb429233..cc8237afef 100644 --- a/drivers/s390/char/tape_char.c +++ b/drivers/s390/char/tape_char.c @@ -371,8 +371,6 @@ __tapechar_ioctl(struct tape_device *device, case MTSEEK: if (device->required_tapemarks) tape_std_terminate_write(device); - default: - ; } rc = tape_mtop(device, op.mt_op, op.mt_count); diff --git a/drivers/s390/net/ctcm_fsms.c b/drivers/s390/net/ctcm_fsms.c index b341075397..377e3689d1 100644 --- a/drivers/s390/net/ctcm_fsms.c +++ b/drivers/s390/net/ctcm_fsms.c @@ -1454,6 +1454,7 @@ static void ctcmpc_chx_rx(fsm_instance *fi, int event, void *arg) get_ccwdev_lock(ch->cdev), saveflags); if (rc != 0) ctcm_ccw_check_rc(ch, rc, "normal RX"); + break; default: break; } diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index d308ff744a..f0d6f205c5 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -434,6 +434,7 @@ static int qeth_l3_correct_routing_type(struct qeth_card *card, if (qeth_is_ipafunc_supported(card, prot, IPA_OSA_MC_ROUTER)) return 0; + goto out_inval; default: goto out_inval; } diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c index 544efd4c42..b8cd75a872 100644 --- a/drivers/s390/scsi/zfcp_sysfs.c +++ b/drivers/s390/scsi/zfcp_sysfs.c @@ -487,6 +487,7 @@ static ssize_t zfcp_sysfs_port_fc_security_show(struct device *dev, if (0 == (status & ZFCP_STATUS_COMMON_OPEN) || 0 == (status & ZFCP_STATUS_COMMON_UNBLOCKED) || 0 == (status & ZFCP_STATUS_PORT_PHYS_OPEN) || + 0 != (status & ZFCP_STATUS_PORT_LINK_TEST) || 0 != (status & ZFCP_STATUS_COMMON_ERP_FAILED) || 0 != (status & ZFCP_STATUS_COMMON_ACCESS_BOXED)) i = sprintf(buf, "unknown\n"); diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c index 30ed3d2363..6baa9b3636 100644 --- a/drivers/scsi/arm/fas216.c +++ b/drivers/scsi/arm/fas216.c @@ -2010,7 +2010,7 @@ static void fas216_rq_sns_done(FAS216_Info *info, struct scsi_cmnd *SCpnt, "request sense complete, result=0x%04x%02x%02x", result, SCpnt->SCp.Message, SCpnt->SCp.Status); - if (result != DID_OK || SCpnt->SCp.Status != GOOD) + if (result != DID_OK || SCpnt->SCp.Status != SAM_STAT_GOOD) /* * Something went wrong. Make sure that we don't * have valid data in the sense buffer that could diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c index 929a3b043a..3f6f14f0ca 100644 --- a/drivers/scsi/hosts.c +++ b/drivers/scsi/hosts.c @@ -488,6 +488,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize) shost_printk(KERN_WARNING, shost, "error handler thread failed to spawn, error = %ld\n", PTR_ERR(shost->ehandler)); + shost->ehandler = NULL; goto fail; } diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c index 9f5068f3bc..dd205414e5 100644 --- a/drivers/scsi/libsas/sas_discover.c +++ b/drivers/scsi/libsas/sas_discover.c @@ -461,7 +461,7 @@ static void sas_discover_domain(struct work_struct *work) break; #else pr_notice("ATA device seen but CONFIG_SCSI_SAS_ATA=N so cannot attach\n"); - /* Fall through */ + fallthrough; #endif /* Fall through - only for the #else condition above. */ default: diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c index 9eceafca59..2dba2b0af1 100644 --- a/drivers/scsi/mpi3mr/mpi3mr_fw.c +++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c @@ -2607,14 +2607,13 @@ static int mpi3mr_issue_iocinit(struct mpi3mr_ioc *mrioc) goto out; } drv_info->information_length = cpu_to_le32(data_len); - strncpy(drv_info->driver_signature, "Broadcom", sizeof(drv_info->driver_signature)); - strncpy(drv_info->os_name, utsname()->sysname, sizeof(drv_info->os_name)); - drv_info->os_name[sizeof(drv_info->os_name) - 1] = 0; - strncpy(drv_info->os_version, utsname()->release, sizeof(drv_info->os_version)); - drv_info->os_version[sizeof(drv_info->os_version) - 1] = 0; - strncpy(drv_info->driver_name, MPI3MR_DRIVER_NAME, sizeof(drv_info->driver_name)); - strncpy(drv_info->driver_version, MPI3MR_DRIVER_VERSION, sizeof(drv_info->driver_version)); - strncpy(drv_info->driver_release_date, MPI3MR_DRIVER_RELDATE, sizeof(drv_info->driver_release_date)); + strscpy(drv_info->driver_signature, "Broadcom", sizeof(drv_info->driver_signature)); + strscpy(drv_info->os_name, utsname()->sysname, sizeof(drv_info->os_name)); + strscpy(drv_info->os_version, utsname()->release, sizeof(drv_info->os_version)); + strscpy(drv_info->driver_name, MPI3MR_DRIVER_NAME, sizeof(drv_info->driver_name)); + strscpy(drv_info->driver_version, MPI3MR_DRIVER_VERSION, sizeof(drv_info->driver_version)); + strscpy(drv_info->driver_release_date, MPI3MR_DRIVER_RELDATE, + sizeof(drv_info->driver_release_date)); drv_info->driver_capabilities = 0; memcpy((u8 *)&mrioc->driver_info, (u8 *)drv_info, sizeof(mrioc->driver_info)); diff --git a/drivers/scsi/pm8001/pm8001_ctl.c b/drivers/scsi/pm8001/pm8001_ctl.c index 0b8802beb7..ec05c42e8e 100644 --- a/drivers/scsi/pm8001/pm8001_ctl.c +++ b/drivers/scsi/pm8001/pm8001_ctl.c @@ -77,7 +77,7 @@ DEVICE_ATTR(interface_rev, S_IRUGO, pm8001_ctl_mpi_interface_rev_show, NULL); * @attr: device attribute (unused) * @buf: the buffer returned * - * A sysfs 'read only' shost attribute. + * A sysfs 'read-only' shost attribute. */ static ssize_t controller_fatal_error_show(struct device *cdev, struct device_attribute *attr, char *buf) @@ -149,7 +149,7 @@ static ssize_t pm8001_ctl_ila_version_show(struct device *cdev, static DEVICE_ATTR(ila_version, 0444, pm8001_ctl_ila_version_show, NULL); /** - * pm8001_ctl_inactive_fw_version_show - Inacative firmware version number + * pm8001_ctl_inactive_fw_version_show - Inactive firmware version number * @cdev: pointer to embedded class device * @attr: device attribute (unused) * @buf: the buffer returned @@ -396,6 +396,7 @@ static DEVICE_ATTR(aap_log, S_IRUGO, pm8001_ctl_aap_log_show, NULL); * @cdev:pointer to embedded class device * @attr: device attribute (unused) * @buf: the buffer returned + * * A sysfs 'read-only' shost attribute. */ static ssize_t pm8001_ctl_ib_queue_log_show(struct device *cdev, @@ -430,6 +431,7 @@ static DEVICE_ATTR(ib_log, S_IRUGO, pm8001_ctl_ib_queue_log_show, NULL); * @cdev:pointer to embedded class device * @attr: device attribute (unused) * @buf: the buffer returned + * * A sysfs 'read-only' shost attribute. */ @@ -464,6 +466,7 @@ static DEVICE_ATTR(ob_log, S_IRUGO, pm8001_ctl_ob_queue_log_show, NULL); * @cdev:pointer to embedded class device * @attr: device attribute (unused) * @buf:the buffer returned + * * A sysfs 'read-only' shost attribute. */ static ssize_t pm8001_ctl_bios_version_show(struct device *cdev, @@ -555,13 +558,13 @@ static ssize_t pm8001_ctl_iop_log_show(struct device *cdev, static DEVICE_ATTR(iop_log, S_IRUGO, pm8001_ctl_iop_log_show, NULL); /** - ** pm8001_ctl_fatal_log_show - fatal error logging - ** @cdev:pointer to embedded class device - ** @attr: device attribute - ** @buf: the buffer returned - ** - ** A sysfs 'read-only' shost attribute. - **/ + * pm8001_ctl_fatal_log_show - fatal error logging + * @cdev:pointer to embedded class device + * @attr: device attribute + * @buf: the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ static ssize_t pm8001_ctl_fatal_log_show(struct device *cdev, struct device_attribute *attr, char *buf) @@ -575,13 +578,13 @@ static ssize_t pm8001_ctl_fatal_log_show(struct device *cdev, static DEVICE_ATTR(fatal_log, S_IRUGO, pm8001_ctl_fatal_log_show, NULL); /** - ** non_fatal_log_show - non fatal error logging - ** @cdev:pointer to embedded class device - ** @attr: device attribute - ** @buf: the buffer returned - ** - ** A sysfs 'read-only' shost attribute. - **/ + * non_fatal_log_show - non fatal error logging + * @cdev:pointer to embedded class device + * @attr: device attribute + * @buf: the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ static ssize_t non_fatal_log_show(struct device *cdev, struct device_attribute *attr, char *buf) { @@ -620,12 +623,13 @@ static ssize_t non_fatal_count_store(struct device *cdev, static DEVICE_ATTR_RW(non_fatal_count); /** - ** pm8001_ctl_gsm_log_show - gsm dump collection - ** @cdev:pointer to embedded class device - ** @attr: device attribute (unused) - ** @buf: the buffer returned - ** A sysfs 'read-only' shost attribute. - **/ + * pm8001_ctl_gsm_log_show - gsm dump collection + * @cdev:pointer to embedded class device + * @attr: device attribute (unused) + * @buf: the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ static ssize_t pm8001_ctl_gsm_log_show(struct device *cdev, struct device_attribute *attr, char *buf) { diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c index 33f8217577..17c0f26e68 100644 --- a/drivers/scsi/pm8001/pm8001_hwi.c +++ b/drivers/scsi/pm8001/pm8001_hwi.c @@ -384,7 +384,7 @@ static void update_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha, /** * pm8001_bar4_shift - function is called to shift BAR base address - * @pm8001_ha : our hba card infomation + * @pm8001_ha : our hba card information * @shiftValue : shifting value in memory bar. */ int pm8001_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue) @@ -1151,7 +1151,7 @@ static void pm8001_hw_chip_rst(struct pm8001_hba_info *pm8001_ha) } /** - * pm8001_chip_iounmap - which maped when initialized. + * pm8001_chip_iounmap - which mapped when initialized. * @pm8001_ha: our hba card information */ void pm8001_chip_iounmap(struct pm8001_hba_info *pm8001_ha) @@ -1187,10 +1187,10 @@ pm8001_chip_intx_interrupt_enable(struct pm8001_hba_info *pm8001_ha) pm8001_cw32(pm8001_ha, 0, MSGU_ODCR, ODCR_CLEAR_ALL); } - /** - * pm8001_chip_intx_interrupt_disable- disable PM8001 chip interrupt - * @pm8001_ha: our hba card information - */ +/** + * pm8001_chip_intx_interrupt_disable - disable PM8001 chip interrupt + * @pm8001_ha: our hba card information + */ static void pm8001_chip_intx_interrupt_disable(struct pm8001_hba_info *pm8001_ha) { @@ -1876,8 +1876,8 @@ static void pm8001_send_read_log(struct pm8001_hba_info *pm8001_ha, * @piomb: the message contents of this outbound message. * * When FW has completed a ssp request for example a IO request, after it has - * filled the SG data with the data, it will trigger this event represent - * that he has finished the job,please check the coresponding buffer. + * filled the SG data with the data, it will trigger this event representing + * that he has finished the job; please check the corresponding buffer. * So we will tell the caller who maybe waiting the result to tell upper layer * that the task has been finished. */ @@ -3522,7 +3522,7 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb) * * when sas layer find a device it will notify LLDD, then the driver register * the domain device to FW, this event is the return device ID which the FW - * has assigned, from now,inter-communication with FW is no longer using the + * has assigned, from now, inter-communication with FW is no longer using the * SAS address, use device ID which FW assigned. */ int pm8001_mpi_reg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c index 313248c7ba..47db7e0bea 100644 --- a/drivers/scsi/pm8001/pm8001_init.c +++ b/drivers/scsi/pm8001/pm8001_init.c @@ -233,7 +233,7 @@ static irqreturn_t pm8001_interrupt_handler_msix(int irq, void *opaque) /** * pm8001_interrupt_handler_intx - main INTx interrupt handler. * @irq: interrupt number - * @dev_id: sas_ha structure. The HBA is retrieved from sas_has structure. + * @dev_id: sas_ha structure. The HBA is retrieved from sas_ha structure. */ static irqreturn_t pm8001_interrupt_handler_intx(int irq, void *dev_id) @@ -439,9 +439,9 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha, } /** - * pm8001_ioremap - remap the pci high physical address to kernal virtual + * pm8001_ioremap - remap the pci high physical address to kernel virtual * address so that we can access them. - * @pm8001_ha:our hba structure. + * @pm8001_ha: our hba structure. */ static int pm8001_ioremap(struct pm8001_hba_info *pm8001_ha) { @@ -652,7 +652,7 @@ static void pm8001_post_sas_ha_init(struct Scsi_Host *shost, * pm8001_init_sas_add - initialize sas address * @pm8001_ha: our ha struct. * - * Currently we just set the fixed SAS address to our HBA,for manufacture, + * Currently we just set the fixed SAS address to our HBA, for manufacture, * it should read from the EEPROM */ static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha) @@ -790,7 +790,7 @@ struct pm8001_mpi3_phy_pg_trx_config { }; /** - * pm8001_get_internal_phy_settings : Retrieves the internal PHY settings + * pm8001_get_internal_phy_settings - Retrieves the internal PHY settings * @pm8001_ha : our adapter * @phycfg : PHY config page to populate */ @@ -810,7 +810,7 @@ void pm8001_get_internal_phy_settings(struct pm8001_hba_info *pm8001_ha, } /** - * pm8001_get_external_phy_settings : Retrieves the external PHY settings + * pm8001_get_external_phy_settings - Retrieves the external PHY settings * @pm8001_ha : our adapter * @phycfg : PHY config page to populate */ @@ -830,7 +830,7 @@ void pm8001_get_external_phy_settings(struct pm8001_hba_info *pm8001_ha, } /** - * pm8001_get_phy_mask : Retrieves the mask that denotes if a PHY is int/ext + * pm8001_get_phy_mask - Retrieves the mask that denotes if a PHY is int/ext * @pm8001_ha : our adapter * @phymask : The PHY mask */ @@ -868,7 +868,7 @@ void pm8001_get_phy_mask(struct pm8001_hba_info *pm8001_ha, int *phymask) } /** - * pm8001_set_phy_settings_ven_117c_12G() : Configure ATTO 12Gb PHY settings + * pm8001_set_phy_settings_ven_117c_12G() - Configure ATTO 12Gb PHY settings * @pm8001_ha : our adapter */ static @@ -903,7 +903,7 @@ int pm8001_set_phy_settings_ven_117c_12G(struct pm8001_hba_info *pm8001_ha) } /** - * pm8001_configure_phy_settings : Configures PHY settings based on vendor ID. + * pm8001_configure_phy_settings - Configures PHY settings based on vendor ID. * @pm8001_ha : our hba. */ static int pm8001_configure_phy_settings(struct pm8001_hba_info *pm8001_ha) @@ -1053,8 +1053,8 @@ static u32 pm8001_request_irq(struct pm8001_hba_info *pm8001_ha) * @ent: pci device id * * This function is the main initialization function, when register a new - * pci driver it is invoked, all struct an hardware initilization should be done - * here, also, register interrupt + * pci driver it is invoked, all struct and hardware initialization should be + * done here, also, register interrupt. */ static int pm8001_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) @@ -1172,10 +1172,11 @@ static int pm8001_pci_probe(struct pci_dev *pdev, return rc; } -/* +/** * pm8001_init_ccb_tag - allocate memory to CCB and tag. * @pm8001_ha: our hba card information. * @shost: scsi host which has been allocated outside. + * @pdev: pci device. */ static int pm8001_init_ccb_tag(struct pm8001_hba_info *pm8001_ha, struct Scsi_Host *shost, @@ -1270,7 +1271,7 @@ static void pm8001_pci_remove(struct pci_dev *pdev) * pm8001_pci_suspend - power management suspend main entry point * @dev: Device struct * - * Returns 0 success, anything else error. + * Return: 0 on success, anything else on error. */ static int __maybe_unused pm8001_pci_suspend(struct device *dev) { @@ -1315,7 +1316,7 @@ static int __maybe_unused pm8001_pci_suspend(struct device *dev) * pm8001_pci_resume - power management resume main entry point * @dev: Device struct * - * Returns 0 success, anything else error. + * Return: 0 on success, anything else on error. */ static int __maybe_unused pm8001_pci_resume(struct device *dev) { diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c index 6f33d821e5..48548a9532 100644 --- a/drivers/scsi/pm8001/pm8001_sas.c +++ b/drivers/scsi/pm8001/pm8001_sas.c @@ -98,14 +98,16 @@ void pm8001_tag_init(struct pm8001_hba_info *pm8001_ha) pm8001_tag_free(pm8001_ha, i); } - /** - * pm8001_mem_alloc - allocate memory for pm8001. - * @pdev: pci device. - * @virt_addr: the allocated virtual address - * @pphys_addr_hi: the physical address high byte address. - * @pphys_addr_lo: the physical address low byte address. - * @mem_size: memory size. - */ +/** + * pm8001_mem_alloc - allocate memory for pm8001. + * @pdev: pci device. + * @virt_addr: the allocated virtual address + * @pphys_addr: DMA address for this device + * @pphys_addr_hi: the physical address high byte address. + * @pphys_addr_lo: the physical address low byte address. + * @mem_size: memory size. + * @align: requested byte alignment + */ int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr, dma_addr_t *pphys_addr, u32 *pphys_addr_hi, u32 *pphys_addr_lo, u32 mem_size, u32 align) @@ -339,7 +341,7 @@ static int pm8001_task_prep_ssp_tm(struct pm8001_hba_info *pm8001_ha, } /** - * pm8001_task_prep_ssp - the dispatcher function,prepare ssp data for ssp task + * pm8001_task_prep_ssp - the dispatcher function, prepare ssp data for ssp task * @pm8001_ha: our hba card information * @ccb: the ccb which attached to ssp task */ @@ -554,10 +556,10 @@ void pm8001_ccb_task_free(struct pm8001_hba_info *pm8001_ha, pm8001_tag_free(pm8001_ha, ccb_idx); } - /** - * pm8001_alloc_dev - find a empty pm8001_device - * @pm8001_ha: our hba card information - */ +/** + * pm8001_alloc_dev - find a empty pm8001_device + * @pm8001_ha: our hba card information + */ static struct pm8001_device *pm8001_alloc_dev(struct pm8001_hba_info *pm8001_ha) { u32 dev; @@ -705,7 +707,7 @@ static void pm8001_tmf_timedout(struct timer_list *t) * @parameter: ssp task parameter. * * when errors or exception happened, we may want to do something, for example - * abort the issued task which result in this execption, it is done by calling + * abort the issued task which result in this exception, it is done by calling * this function, note it is also with the task execute interface. */ static int pm8001_exec_internal_tmf_task(struct domain_device *dev, @@ -984,11 +986,12 @@ void pm8001_open_reject_retry( } /** - * pm8001_I_T_nexus_reset() - * Standard mandates link reset for ATA (type 0) and hard reset for - * SSP (type 1) , only for RECOVERY - * @dev: the device structure for the device to reset. - */ + * pm8001_I_T_nexus_reset() - reset the initiator/target connection + * @dev: the device structure for the device to reset. + * + * Standard mandates link reset for ATA (type 0) and hard reset for + * SSP (type 1), only for RECOVERY + */ int pm8001_I_T_nexus_reset(struct domain_device *dev) { int rc = TMF_RESP_FUNC_FAILED; diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c index 45ecd96399..6ffe17b849 100644 --- a/drivers/scsi/pm8001/pm80xx_hwi.c +++ b/drivers/scsi/pm8001/pm80xx_hwi.c @@ -140,7 +140,7 @@ ssize_t pm80xx_get_fatal_dump(struct device *cdev, pm8001_ha->fatal_bar_loc = 0; } - /* Read until accum_len is retrived */ + /* Read until accum_len is retrieved */ accum_len = pm8001_mr32(fatal_table_address, MPI_FATAL_EDUMP_TABLE_ACCUM_LEN); /* Determine length of data between previously stored transfer length @@ -1011,7 +1011,7 @@ static int mpi_init_check(struct pm8001_hba_info *pm8001_ha) value); return -EBUSY; } - /* check the MPI-State for initialization upto 100ms*/ + /* check the MPI-State for initialization up to 100ms*/ max_wait_count = 5;/* 100 msec */ do { msleep(FW_READY_INTERVAL); @@ -1093,7 +1093,7 @@ static int init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha) value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0); - /** + /* * lower 26 bits of SCRATCHPAD0 register describes offset within the * PCIe BAR where the MPI configuration table is present */ @@ -1101,7 +1101,7 @@ static int init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha) pm8001_dbg(pm8001_ha, DEV, "Scratchpad 0 Offset: 0x%x value 0x%x\n", offset, value); - /** + /* * Upper 6 bits describe the offset within PCI config space where BAR * is located. */ @@ -1109,7 +1109,7 @@ static int init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha) pcibar = get_pci_bar_index(pcilogic); pm8001_dbg(pm8001_ha, INIT, "Scratchpad 0 PCI BAR: %d\n", pcibar); - /** + /* * Make sure the offset falls inside the ioremapped PCI BAR */ if (offset > pm8001_ha->io_mem[pcibar].memsize) { @@ -1121,7 +1121,7 @@ static int init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha) pm8001_ha->main_cfg_tbl_addr = base_addr = pm8001_ha->io_mem[pcibar].memvirtaddr + offset; - /** + /* * Validate main configuration table address: first DWord should read * "PMCS" */ @@ -1385,7 +1385,7 @@ pm80xx_get_encrypt_info(struct pm8001_hba_info *pm8001_ha) } /** - * pm80xx_encrypt_update - update flash with encryption informtion + * pm80xx_encrypt_update - update flash with encryption information * @pm8001_ha: our hba card information. */ static int pm80xx_encrypt_update(struct pm8001_hba_info *pm8001_ha) @@ -1422,7 +1422,7 @@ static int pm80xx_encrypt_update(struct pm8001_hba_info *pm8001_ha) } /** - * pm80xx_chip_init - the main init function that initialize whole PM8001 chip. + * pm80xx_chip_init - the main init function that initializes whole PM8001 chip. * @pm8001_ha: our hba card information */ static int pm80xx_chip_init(struct pm8001_hba_info *pm8001_ha) @@ -1541,7 +1541,7 @@ static int mpi_uninit_check(struct pm8001_hba_info *pm8001_ha) } /** - * pm80xx_fatal_errors - returns non zero *ONLY* when fatal errors + * pm80xx_fatal_errors - returns non-zero *ONLY* when fatal errors * @pm8001_ha: our hba card information * * Fatal errors are recoverable only after a host reboot. @@ -1576,8 +1576,8 @@ pm80xx_fatal_errors(struct pm8001_hba_info *pm8001_ha) } /** - * pm80xx_chip_soft_rst - soft reset the PM8001 chip, so that the clear all - * the FW register status to the originated status. + * pm80xx_chip_soft_rst - soft reset the PM8001 chip, so that all + * FW register status are reset to the originated status. * @pm8001_ha: our hba card information */ @@ -1895,13 +1895,13 @@ static void pm80xx_send_read_log(struct pm8001_hba_info *pm8001_ha, } /** - * mpi_ssp_completion- process the event that FW response to the SSP request. + * mpi_ssp_completion - process the event that FW response to the SSP request. * @pm8001_ha: our hba card information * @piomb: the message contents of this outbound message. * * When FW has completed a ssp request for example a IO request, after it has - * filled the SG data with the data, it will trigger this event represent - * that he has finished the job,please check the coresponding buffer. + * filled the SG data with the data, it will trigger this event representing + * that he has finished the job; please check the corresponding buffer. * So we will tell the caller who maybe waiting the result to tell upper layer * that the task has been finished. */ @@ -3217,7 +3217,7 @@ mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) } /** - * pm80xx_hw_event_ack_req- For PM8001,some events need to acknowage to FW. + * pm80xx_hw_event_ack_req- For PM8001, some events need to acknowledge to FW. * @pm8001_ha: our hba card information * @Qnum: the outbound queue message number. * @SEA: source of event to ack @@ -3275,7 +3275,7 @@ static void hw_event_port_recover(struct pm8001_hba_info *pm8001_ha, } /** - * hw_event_sas_phy_up -FW tells me a SAS phy up event. + * hw_event_sas_phy_up - FW tells me a SAS phy up event. * @pm8001_ha: our hba card information * @piomb: IO message buffer */ @@ -3353,7 +3353,7 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb) } /** - * hw_event_sata_phy_up -FW tells me a SATA phy up event. + * hw_event_sata_phy_up - FW tells me a SATA phy up event. * @pm8001_ha: our hba card information * @piomb: IO message buffer */ @@ -3400,7 +3400,7 @@ hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb) } /** - * hw_event_phy_down -we should notify the libsas the phy is down. + * hw_event_phy_down - we should notify the libsas the phy is down. * @pm8001_ha: our hba card information * @piomb: IO message buffer */ @@ -3500,7 +3500,7 @@ static int mpi_phy_start_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) } /** - * mpi_thermal_hw_event -The hw event has come. + * mpi_thermal_hw_event - a thermal hw event has come. * @pm8001_ha: our hba card information * @piomb: IO message buffer */ @@ -3530,7 +3530,7 @@ static int mpi_thermal_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb) } /** - * mpi_hw_event -The hw event has come. + * mpi_hw_event - The hw event has come. * @pm8001_ha: our hba card information * @piomb: IO message buffer */ @@ -4025,7 +4025,7 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb) case OPC_OUB_SET_DEV_INFO: pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SET_DEV_INFO\n"); break; - /* spcv specifc commands */ + /* spcv specific commands */ case OPC_OUB_PHY_START_RESP: pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_PHY_START_RESP opcode:%x\n", opc); @@ -4186,7 +4186,7 @@ static void build_smp_cmd(u32 deviceID, __le32 hTag, } /** - * pm80xx_chip_smp_req - send a SMP task to FW + * pm80xx_chip_smp_req - send an SMP task to FW * @pm8001_ha: our hba card information. * @ccb: the ccb information this request used. */ @@ -4346,7 +4346,7 @@ static int check_enc_sat_cmd(struct sas_task *task) } /** - * pm80xx_chip_ssp_io_req - send a SSP task to FW + * pm80xx_chip_ssp_io_req - send an SSP task to FW * @pm8001_ha: our hba card information. * @ccb: the ccb information this request used. */ @@ -4750,13 +4750,13 @@ pm80xx_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id) payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE | LINKMODE_AUTO | pm8001_ha->link_rate | phy_id); /* SSC Disable and SAS Analog ST configuration */ - /** + /* payload.ase_sh_lm_slr_phyid = cpu_to_le32(SSC_DISABLE_30 | SAS_ASE | SPINHOLD_DISABLE | LINKMODE_AUTO | LINKRATE_15 | LINKRATE_30 | LINKRATE_60 | phy_id); Have to add "SAS PHY Analog Setup SPASTI 1 Byte" Based on need - **/ + */ payload.sas_identify.dev_type = SAS_END_DEVICE; payload.sas_identify.initiator_bits = SAS_PROTOCOL_ALL; diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 8f9727e525..7456a26aef 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -194,7 +194,7 @@ void scsi_queue_insert(struct scsi_cmnd *cmd, int reason) * @bufflen: len of buffer * @sense: optional sense buffer * @sshdr: optional decoded sense header - * @timeout: request timeout in seconds + * @timeout: request timeout in HZ * @retries: number of times to retry request * @flags: flags for ->cmd_flags * @rq_flags: flags for ->rq_flags diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 6d2d63629a..b8d55af763 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -98,11 +98,7 @@ MODULE_ALIAS_SCSI_DEVICE(TYPE_MOD); MODULE_ALIAS_SCSI_DEVICE(TYPE_RBC); MODULE_ALIAS_SCSI_DEVICE(TYPE_ZBC); -#if !defined(CONFIG_DEBUG_BLOCK_EXT_DEVT) #define SD_MINORS 16 -#else -#define SD_MINORS 0 -#endif static void sd_config_discard(struct scsi_disk *, unsigned int); static void sd_config_write_same(struct scsi_disk *); diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index c98d540ac0..194755c9dd 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -1229,8 +1229,13 @@ static inline int ufshcd_vops_pwr_change_notify(struct ufs_hba *hba, static inline void ufshcd_vops_setup_xfer_req(struct ufs_hba *hba, int tag, bool is_scsi_cmd) { - if (hba->vops && hba->vops->setup_xfer_req) - return hba->vops->setup_xfer_req(hba, tag, is_scsi_cmd); + if (hba->vops && hba->vops->setup_xfer_req) { + unsigned long flags; + + spin_lock_irqsave(hba->host->host_lock, flags); + hba->vops->setup_xfer_req(hba, tag, is_scsi_cmd); + spin_unlock_irqrestore(hba->host->host_lock, flags); + } } static inline void ufshcd_vops_setup_task_mgmt(struct ufs_hba *hba, diff --git a/drivers/usb/gadget/udc/fsl_qe_udc.c b/drivers/usb/gadget/udc/fsl_qe_udc.c index 8e85889336..15db7a3868 100644 --- a/drivers/usb/gadget/udc/fsl_qe_udc.c +++ b/drivers/usb/gadget/udc/fsl_qe_udc.c @@ -586,6 +586,7 @@ static int qe_ep_init(struct qe_udc *udc, case USB_SPEED_FULL: if (max <= 1023) break; + fallthrough; default: goto en_done; } diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c index c41e58ece8..f3f4b96511 100644 --- a/drivers/video/fbdev/core/fbmem.c +++ b/drivers/video/fbdev/core/fbmem.c @@ -970,13 +970,11 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var) fb_var_to_videomode(&mode2, &info->var); /* make sure we don't delete the videomode of current var */ ret = fb_mode_is_equal(&mode1, &mode2); - - if (!ret) - fbcon_mode_deleted(info, &mode1); - - if (!ret) - fb_delete_videomode(&mode1, &info->modelist); - + if (!ret) { + ret = fbcon_mode_deleted(info, &mode1); + if (!ret) + fb_delete_videomode(&mode1, &info->modelist); + } return ret ? -EINVAL : 0; } diff --git a/drivers/video/fbdev/xilinxfb.c b/drivers/video/fbdev/xilinxfb.c index ffbf900648..438e2c7814 100644 --- a/drivers/video/fbdev/xilinxfb.c +++ b/drivers/video/fbdev/xilinxfb.c @@ -241,6 +241,8 @@ xilinx_fb_blank(int blank_mode, struct fb_info *fbi) case FB_BLANK_POWERDOWN: /* turn off panel */ xilinx_fb_out32(drvdata, REG_CTRL, 0); + break; + default: break; } diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c index 38b127b9ed..9e7d9d0c76 100644 --- a/fs/btrfs/block-group.c +++ b/fs/btrfs/block-group.c @@ -1498,9 +1498,18 @@ void btrfs_reclaim_bgs_work(struct work_struct *work) if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE)) return; - mutex_lock(&fs_info->reclaim_bgs_lock); + /* + * Long running balances can keep us blocked here for eternity, so + * simply skip reclaim if we're unable to get the mutex. + */ + if (!mutex_trylock(&fs_info->reclaim_bgs_lock)) { + btrfs_exclop_finish(fs_info); + return; + } + spin_lock(&fs_info->unused_bgs_lock); while (!list_empty(&fs_info->reclaim_bgs)) { + u64 zone_unusable; int ret = 0; bg = list_first_entry(&fs_info->reclaim_bgs, @@ -1534,13 +1543,22 @@ void btrfs_reclaim_bgs_work(struct work_struct *work) goto next; } + /* + * Cache the zone_unusable value before turning the block group + * to read only. As soon as the blog group is read only it's + * zone_unusable value gets moved to the block group's read-only + * bytes and isn't available for calculations anymore. + */ + zone_unusable = bg->zone_unusable; ret = inc_block_group_ro(bg, 0); up_write(&space_info->groups_sem); if (ret < 0) goto next; - btrfs_info(fs_info, "reclaiming chunk %llu with %llu%% used", - bg->start, div_u64(bg->used * 100, bg->length)); + btrfs_info(fs_info, + "reclaiming chunk %llu with %llu%% used %llu%% unusable", + bg->start, div_u64(bg->used * 100, bg->length), + div64_u64(zone_unusable * 100, bg->length)); trace_btrfs_reclaim_block_group(bg); ret = btrfs_relocate_chunk(fs_info, bg->start); if (ret) @@ -2197,6 +2215,13 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info) return ret; } +/* + * This function, insert_block_group_item(), belongs to the phase 2 of chunk + * allocation. + * + * See the comment at btrfs_chunk_alloc() for details about the chunk allocation + * phases. + */ static int insert_block_group_item(struct btrfs_trans_handle *trans, struct btrfs_block_group *block_group) { @@ -2219,15 +2244,19 @@ static int insert_block_group_item(struct btrfs_trans_handle *trans, return btrfs_insert_item(trans, root, &key, &bgi, sizeof(bgi)); } +/* + * This function, btrfs_create_pending_block_groups(), belongs to the phase 2 of + * chunk allocation. + * + * See the comment at btrfs_chunk_alloc() for details about the chunk allocation + * phases. + */ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans) { struct btrfs_fs_info *fs_info = trans->fs_info; struct btrfs_block_group *block_group; int ret = 0; - if (!trans->can_flush_pending_bgs) - return; - while (!list_empty(&trans->new_bgs)) { int index; @@ -2242,6 +2271,13 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans) ret = insert_block_group_item(trans, block_group); if (ret) btrfs_abort_transaction(trans, ret); + if (!block_group->chunk_item_inserted) { + mutex_lock(&fs_info->chunk_mutex); + ret = btrfs_chunk_alloc_add_chunk_item(trans, block_group); + mutex_unlock(&fs_info->chunk_mutex); + if (ret) + btrfs_abort_transaction(trans, ret); + } ret = btrfs_finish_chunk_alloc(trans, block_group->start, block_group->length); if (ret) @@ -2265,8 +2301,9 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans) btrfs_trans_release_chunk_metadata(trans); } -int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used, - u64 type, u64 chunk_offset, u64 size) +struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *trans, + u64 bytes_used, u64 type, + u64 chunk_offset, u64 size) { struct btrfs_fs_info *fs_info = trans->fs_info; struct btrfs_block_group *cache; @@ -2276,7 +2313,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used, cache = btrfs_create_block_group_cache(fs_info, chunk_offset); if (!cache) - return -ENOMEM; + return ERR_PTR(-ENOMEM); cache->length = size; set_free_space_tree_thresholds(cache); @@ -2290,7 +2327,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used, ret = btrfs_load_block_group_zone_info(cache, true); if (ret) { btrfs_put_block_group(cache); - return ret; + return ERR_PTR(ret); } ret = exclude_super_stripes(cache); @@ -2298,7 +2335,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used, /* We may have excluded something, so call this just in case */ btrfs_free_excluded_extents(cache); btrfs_put_block_group(cache); - return ret; + return ERR_PTR(ret); } add_new_free_space(cache, chunk_offset, chunk_offset + size); @@ -2325,7 +2362,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used, if (ret) { btrfs_remove_free_space_cache(cache); btrfs_put_block_group(cache); - return ret; + return ERR_PTR(ret); } /* @@ -2344,7 +2381,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used, btrfs_update_delayed_refs_rsv(trans); set_avail_alloc_bits(fs_info, type); - return 0; + return cache; } /* @@ -3222,11 +3259,203 @@ int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, u64 type) return btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE); } +static int do_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags) +{ + struct btrfs_block_group *bg; + int ret; + + /* + * Check if we have enough space in the system space info because we + * will need to update device items in the chunk btree and insert a new + * chunk item in the chunk btree as well. This will allocate a new + * system block group if needed. + */ + check_system_chunk(trans, flags); + + bg = btrfs_alloc_chunk(trans, flags); + if (IS_ERR(bg)) { + ret = PTR_ERR(bg); + goto out; + } + + /* + * If this is a system chunk allocation then stop right here and do not + * add the chunk item to the chunk btree. This is to prevent a deadlock + * because this system chunk allocation can be triggered while COWing + * some extent buffer of the chunk btree and while holding a lock on a + * parent extent buffer, in which case attempting to insert the chunk + * item (or update the device item) would result in a deadlock on that + * parent extent buffer. In this case defer the chunk btree updates to + * the second phase of chunk allocation and keep our reservation until + * the second phase completes. + * + * This is a rare case and can only be triggered by the very few cases + * we have where we need to touch the chunk btree outside chunk allocation + * and chunk removal. These cases are basically adding a device, removing + * a device or resizing a device. + */ + if (flags & BTRFS_BLOCK_GROUP_SYSTEM) + return 0; + + ret = btrfs_chunk_alloc_add_chunk_item(trans, bg); + /* + * Normally we are not expected to fail with -ENOSPC here, since we have + * previously reserved space in the system space_info and allocated one + * new system chunk if necessary. However there are two exceptions: + * + * 1) We may have enough free space in the system space_info but all the + * existing system block groups have a profile which can not be used + * for extent allocation. + * + * This happens when mounting in degraded mode. For example we have a + * RAID1 filesystem with 2 devices, lose one device and mount the fs + * using the other device in degraded mode. If we then allocate a chunk, + * we may have enough free space in the existing system space_info, but + * none of the block groups can be used for extent allocation since they + * have a RAID1 profile, and because we are in degraded mode with a + * single device, we are forced to allocate a new system chunk with a + * SINGLE profile. Making check_system_chunk() iterate over all system + * block groups and check if they have a usable profile and enough space + * can be slow on very large filesystems, so we tolerate the -ENOSPC and + * try again after forcing allocation of a new system chunk. Like this + * we avoid paying the cost of that search in normal circumstances, when + * we were not mounted in degraded mode; + * + * 2) We had enough free space info the system space_info, and one suitable + * block group to allocate from when we called check_system_chunk() + * above. However right after we called it, the only system block group + * with enough free space got turned into RO mode by a running scrub, + * and in this case we have to allocate a new one and retry. We only + * need do this allocate and retry once, since we have a transaction + * handle and scrub uses the commit root to search for block groups. + */ + if (ret == -ENOSPC) { + const u64 sys_flags = btrfs_system_alloc_profile(trans->fs_info); + struct btrfs_block_group *sys_bg; + + sys_bg = btrfs_alloc_chunk(trans, sys_flags); + if (IS_ERR(sys_bg)) { + ret = PTR_ERR(sys_bg); + btrfs_abort_transaction(trans, ret); + goto out; + } + + ret = btrfs_chunk_alloc_add_chunk_item(trans, sys_bg); + if (ret) { + btrfs_abort_transaction(trans, ret); + goto out; + } + + ret = btrfs_chunk_alloc_add_chunk_item(trans, bg); + if (ret) { + btrfs_abort_transaction(trans, ret); + goto out; + } + } else if (ret) { + btrfs_abort_transaction(trans, ret); + goto out; + } +out: + btrfs_trans_release_chunk_metadata(trans); + + return ret; +} + /* - * If force is CHUNK_ALLOC_FORCE: + * Chunk allocation is done in 2 phases: + * + * 1) Phase 1 - through btrfs_chunk_alloc() we allocate device extents for + * the chunk, the chunk mapping, create its block group and add the items + * that belong in the chunk btree to it - more specifically, we need to + * update device items in the chunk btree and add a new chunk item to it. + * + * 2) Phase 2 - through btrfs_create_pending_block_groups(), we add the block + * group item to the extent btree and the device extent items to the devices + * btree. + * + * This is done to prevent deadlocks. For example when COWing a node from the + * extent btree we are holding a write lock on the node's parent and if we + * trigger chunk allocation and attempted to insert the new block group item + * in the extent btree right way, we could deadlock because the path for the + * insertion can include that parent node. At first glance it seems impossible + * to trigger chunk allocation after starting a transaction since tasks should + * reserve enough transaction units (metadata space), however while that is true + * most of the time, chunk allocation may still be triggered for several reasons: + * + * 1) When reserving metadata, we check if there is enough free space in the + * metadata space_info and therefore don't trigger allocation of a new chunk. + * However later when the task actually tries to COW an extent buffer from + * the extent btree or from the device btree for example, it is forced to + * allocate a new block group (chunk) because the only one that had enough + * free space was just turned to RO mode by a running scrub for example (or + * device replace, block group reclaim thread, etc), so we can not use it + * for allocating an extent and end up being forced to allocate a new one; + * + * 2) Because we only check that the metadata space_info has enough free bytes, + * we end up not allocating a new metadata chunk in that case. However if + * the filesystem was mounted in degraded mode, none of the existing block + * groups might be suitable for extent allocation due to their incompatible + * profile (for e.g. mounting a 2 devices filesystem, where all block groups + * use a RAID1 profile, in degraded mode using a single device). In this case + * when the task attempts to COW some extent buffer of the extent btree for + * example, it will trigger allocation of a new metadata block group with a + * suitable profile (SINGLE profile in the example of the degraded mount of + * the RAID1 filesystem); + * + * 3) The task has reserved enough transaction units / metadata space, but when + * it attempts to COW an extent buffer from the extent or device btree for + * example, it does not find any free extent in any metadata block group, + * therefore forced to try to allocate a new metadata block group. + * This is because some other task allocated all available extents in the + * meanwhile - this typically happens with tasks that don't reserve space + * properly, either intentionally or as a bug. One example where this is + * done intentionally is fsync, as it does not reserve any transaction units + * and ends up allocating a variable number of metadata extents for log + * tree extent buffers. + * + * We also need this 2 phases setup when adding a device to a filesystem with + * a seed device - we must create new metadata and system chunks without adding + * any of the block group items to the chunk, extent and device btrees. If we + * did not do it this way, we would get ENOSPC when attempting to update those + * btrees, since all the chunks from the seed device are read-only. + * + * Phase 1 does the updates and insertions to the chunk btree because if we had + * it done in phase 2 and have a thundering herd of tasks allocating chunks in + * parallel, we risk having too many system chunks allocated by many tasks if + * many tasks reach phase 1 without the previous ones completing phase 2. In the + * extreme case this leads to exhaustion of the system chunk array in the + * superblock. This is easier to trigger if using a btree node/leaf size of 64K + * and with RAID filesystems (so we have more device items in the chunk btree). + * This has happened before and commit eafa4fd0ad0607 ("btrfs: fix exhaustion of + * the system chunk array due to concurrent allocations") provides more details. + * + * For allocation of system chunks, we defer the updates and insertions into the + * chunk btree to phase 2. This is to prevent deadlocks on extent buffers because + * if the chunk allocation is triggered while COWing an extent buffer of the + * chunk btree, we are holding a lock on the parent of that extent buffer and + * doing the chunk btree updates and insertions can require locking that parent. + * This is for the very few and rare cases where we update the chunk btree that + * are not chunk allocation or chunk removal: adding a device, removing a device + * or resizing a device. + * + * The reservation of system space, done through check_system_chunk(), as well + * as all the updates and insertions into the chunk btree must be done while + * holding fs_info->chunk_mutex. This is important to guarantee that while COWing + * an extent buffer from the chunks btree we never trigger allocation of a new + * system chunk, which would result in a deadlock (trying to lock twice an + * extent buffer of the chunk btree, first time before triggering the chunk + * allocation and the second time during chunk allocation while attempting to + * update the chunks btree). The system chunk array is also updated while holding + * that mutex. The same logic applies to removing chunks - we must reserve system + * space, update the chunk btree and the system chunk array in the superblock + * while holding fs_info->chunk_mutex. + * + * This function, btrfs_chunk_alloc(), belongs to phase 1. + * + * If @force is CHUNK_ALLOC_FORCE: * - return 1 if it successfully allocates a chunk, * - return errors including -ENOSPC otherwise. - * If force is NOT CHUNK_ALLOC_FORCE: + * If @force is NOT CHUNK_ALLOC_FORCE: * - return 0 if it doesn't need to allocate a new chunk, * - return 1 if it successfully allocates a chunk, * - return errors including -ENOSPC otherwise. @@ -3243,6 +3472,13 @@ int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags, /* Don't re-enter if we're already allocating a chunk */ if (trans->allocating_chunk) return -ENOSPC; + /* + * If we are removing a chunk, don't re-enter or we would deadlock. + * System space reservation and system chunk allocation is done by the + * chunk remove operation (btrfs_remove_chunk()). + */ + if (trans->removing_chunk) + return -ENOSPC; space_info = btrfs_find_space_info(fs_info, flags); ASSERT(space_info); @@ -3306,13 +3542,7 @@ int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags, force_metadata_allocation(fs_info); } - /* - * Check if we have enough space in SYSTEM chunk because we may need - * to update devices. - */ - check_system_chunk(trans, flags); - - ret = btrfs_alloc_chunk(trans, flags); + ret = do_chunk_alloc(trans, flags); trans->allocating_chunk = false; spin_lock(&space_info->lock); @@ -3331,22 +3561,6 @@ int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags, space_info->chunk_alloc = 0; spin_unlock(&space_info->lock); mutex_unlock(&fs_info->chunk_mutex); - /* - * When we allocate a new chunk we reserve space in the chunk block - * reserve to make sure we can COW nodes/leafs in the chunk tree or - * add new nodes/leafs to it if we end up needing to do it when - * inserting the chunk item and updating device items as part of the - * second phase of chunk allocation, performed by - * btrfs_finish_chunk_alloc(). So make sure we don't accumulate a - * large number of new block groups to create in our transaction - * handle's new_bgs list to avoid exhausting the chunk block reserve - * in extreme cases - like having a single transaction create many new - * block groups when starting to write out the free space caches of all - * the block groups that were made dirty during the lifetime of the - * transaction. - */ - if (trans->chunk_bytes_reserved >= (u64)SZ_2M) - btrfs_create_pending_block_groups(trans); return ret; } @@ -3367,7 +3581,6 @@ static u64 get_profile_num_devs(struct btrfs_fs_info *fs_info, u64 type) */ void check_system_chunk(struct btrfs_trans_handle *trans, u64 type) { - struct btrfs_transaction *cur_trans = trans->transaction; struct btrfs_fs_info *fs_info = trans->fs_info; struct btrfs_space_info *info; u64 left; @@ -3382,7 +3595,6 @@ void check_system_chunk(struct btrfs_trans_handle *trans, u64 type) lockdep_assert_held(&fs_info->chunk_mutex); info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM); -again: spin_lock(&info->lock); left = info->total_bytes - btrfs_space_info_used(info, true); spin_unlock(&info->lock); @@ -3401,76 +3613,39 @@ void check_system_chunk(struct btrfs_trans_handle *trans, u64 type) if (left < thresh) { u64 flags = btrfs_system_alloc_profile(fs_info); - u64 reserved = atomic64_read(&cur_trans->chunk_bytes_reserved); - - /* - * If there's not available space for the chunk tree (system - * space) and there are other tasks that reserved space for - * creating a new system block group, wait for them to complete - * the creation of their system block group and release excess - * reserved space. We do this because: - * - * *) We can end up allocating more system chunks than necessary - * when there are multiple tasks that are concurrently - * allocating block groups, which can lead to exhaustion of - * the system array in the superblock; - * - * *) If we allocate extra and unnecessary system block groups, - * despite being empty for a long time, and possibly forever, - * they end not being added to the list of unused block groups - * because that typically happens only when deallocating the - * last extent from a block group - which never happens since - * we never allocate from them in the first place. The few - * exceptions are when mounting a filesystem or running scrub, - * which add unused block groups to the list of unused block - * groups, to be deleted by the cleaner kthread. - * And even when they are added to the list of unused block - * groups, it can take a long time until they get deleted, - * since the cleaner kthread might be sleeping or busy with - * other work (deleting subvolumes, running delayed iputs, - * defrag scheduling, etc); - * - * This is rare in practice, but can happen when too many tasks - * are allocating blocks groups in parallel (via fallocate()) - * and before the one that reserved space for a new system block - * group finishes the block group creation and releases the space - * reserved in excess (at btrfs_create_pending_block_groups()), - * other tasks end up here and see free system space temporarily - * not enough for updating the chunk tree. - * - * We unlock the chunk mutex before waiting for such tasks and - * lock it again after the wait, otherwise we would deadlock. - * It is safe to do so because allocating a system chunk is the - * first thing done while allocating a new block group. - */ - if (reserved > trans->chunk_bytes_reserved) { - const u64 min_needed = reserved - thresh; - - mutex_unlock(&fs_info->chunk_mutex); - wait_event(cur_trans->chunk_reserve_wait, - atomic64_read(&cur_trans->chunk_bytes_reserved) <= - min_needed); - mutex_lock(&fs_info->chunk_mutex); - goto again; - } + struct btrfs_block_group *bg; /* * Ignore failure to create system chunk. We might end up not * needing it, as we might not need to COW all nodes/leafs from * the paths we visit in the chunk tree (they were already COWed * or created in the current transaction for example). + * + * Also, if our caller is allocating a system chunk, do not + * attempt to insert the chunk item in the chunk btree, as we + * could deadlock on an extent buffer since our caller may be + * COWing an extent buffer from the chunk btree. */ - ret = btrfs_alloc_chunk(trans, flags); + bg = btrfs_alloc_chunk(trans, flags); + if (IS_ERR(bg)) { + ret = PTR_ERR(bg); + } else if (!(type & BTRFS_BLOCK_GROUP_SYSTEM)) { + /* + * If we fail to add the chunk item here, we end up + * trying again at phase 2 of chunk allocation, at + * btrfs_create_pending_block_groups(). So ignore + * any error here. + */ + btrfs_chunk_alloc_add_chunk_item(trans, bg); + } } if (!ret) { ret = btrfs_block_rsv_add(fs_info->chunk_root, &fs_info->chunk_block_rsv, thresh, BTRFS_RESERVE_NO_FLUSH); - if (!ret) { - atomic64_add(thresh, &cur_trans->chunk_bytes_reserved); + if (!ret) trans->chunk_bytes_reserved += thresh; - } } } diff --git a/fs/btrfs/block-group.h b/fs/btrfs/block-group.h index 7b927425dc..c72a71efcb 100644 --- a/fs/btrfs/block-group.h +++ b/fs/btrfs/block-group.h @@ -97,6 +97,7 @@ struct btrfs_block_group { unsigned int removed:1; unsigned int to_copy:1; unsigned int relocating_repair:1; + unsigned int chunk_item_inserted:1; int disk_cache_state; @@ -268,8 +269,9 @@ void btrfs_reclaim_bgs_work(struct work_struct *work); void btrfs_reclaim_bgs(struct btrfs_fs_info *fs_info); void btrfs_mark_bg_to_reclaim(struct btrfs_block_group *bg); int btrfs_read_block_groups(struct btrfs_fs_info *info); -int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used, - u64 type, u64 chunk_offset, u64 size); +struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *trans, + u64 bytes_used, u64 type, + u64 chunk_offset, u64 size); void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans); int btrfs_inc_block_group_ro(struct btrfs_block_group *cache, bool do_chunk_alloc); diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 4bc3ca2cbd..c5c08c87e1 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -364,49 +364,6 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans, return 0; } -static struct extent_buffer *alloc_tree_block_no_bg_flush( - struct btrfs_trans_handle *trans, - struct btrfs_root *root, - u64 parent_start, - const struct btrfs_disk_key *disk_key, - int level, - u64 hint, - u64 empty_size, - enum btrfs_lock_nesting nest) -{ - struct btrfs_fs_info *fs_info = root->fs_info; - struct extent_buffer *ret; - - /* - * If we are COWing a node/leaf from the extent, chunk, device or free - * space trees, make sure that we do not finish block group creation of - * pending block groups. We do this to avoid a deadlock. - * COWing can result in allocation of a new chunk, and flushing pending - * block groups (btrfs_create_pending_block_groups()) can be triggered - * when finishing allocation of a new chunk. Creation of a pending block - * group modifies the extent, chunk, device and free space trees, - * therefore we could deadlock with ourselves since we are holding a - * lock on an extent buffer that btrfs_create_pending_block_groups() may - * try to COW later. - * For similar reasons, we also need to delay flushing pending block - * groups when splitting a leaf or node, from one of those trees, since - * we are holding a write lock on it and its parent or when inserting a - * new root node for one of those trees. - */ - if (root == fs_info->extent_root || - root == fs_info->chunk_root || - root == fs_info->dev_root || - root == fs_info->free_space_root) - trans->can_flush_pending_bgs = false; - - ret = btrfs_alloc_tree_block(trans, root, parent_start, - root->root_key.objectid, disk_key, level, - hint, empty_size, nest); - trans->can_flush_pending_bgs = true; - - return ret; -} - /* * does the dirty work in cow of a single block. The parent block (if * supplied) is updated to point to the new cow copy. The new buffer is marked @@ -455,8 +412,9 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans, if ((root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && parent) parent_start = parent->start; - cow = alloc_tree_block_no_bg_flush(trans, root, parent_start, &disk_key, - level, search_start, empty_size, nest); + cow = btrfs_alloc_tree_block(trans, root, parent_start, + root->root_key.objectid, &disk_key, level, + search_start, empty_size, nest); if (IS_ERR(cow)) return PTR_ERR(cow); @@ -2458,9 +2416,9 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans, else btrfs_node_key(lower, &lower_key, 0); - c = alloc_tree_block_no_bg_flush(trans, root, 0, &lower_key, level, - root->node->start, 0, - BTRFS_NESTING_NEW_ROOT); + c = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid, + &lower_key, level, root->node->start, 0, + BTRFS_NESTING_NEW_ROOT); if (IS_ERR(c)) return PTR_ERR(c); @@ -2589,8 +2547,9 @@ static noinline int split_node(struct btrfs_trans_handle *trans, mid = (c_nritems + 1) / 2; btrfs_node_key(c, &disk_key, mid); - split = alloc_tree_block_no_bg_flush(trans, root, 0, &disk_key, level, - c->start, 0, BTRFS_NESTING_SPLIT); + split = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid, + &disk_key, level, c->start, 0, + BTRFS_NESTING_SPLIT); if (IS_ERR(split)) return PTR_ERR(split); @@ -3381,10 +3340,10 @@ static noinline int split_leaf(struct btrfs_trans_handle *trans, * BTRFS_NESTING_SPLIT_THE_SPLITTENING if we need to, but for now just * use BTRFS_NESTING_NEW_ROOT. */ - right = alloc_tree_block_no_bg_flush(trans, root, 0, &disk_key, 0, - l->start, 0, num_doubles ? - BTRFS_NESTING_NEW_ROOT : - BTRFS_NESTING_SPLIT); + right = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid, + &disk_key, 0, l->start, 0, + num_doubles ? BTRFS_NESTING_NEW_ROOT : + BTRFS_NESTING_SPLIT); if (IS_ERR(right)) return PTR_ERR(right); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index e6eb209873..8f60314c36 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -2271,13 +2271,127 @@ static blk_status_t btrfs_submit_bio_start(struct inode *inode, struct bio *bio, return btrfs_csum_one_bio(BTRFS_I(inode), bio, 0, 0); } +/* + * Split an extent_map at [start, start + len] + * + * This function is intended to be used only for extract_ordered_extent(). + */ +static int split_zoned_em(struct btrfs_inode *inode, u64 start, u64 len, + u64 pre, u64 post) +{ + struct extent_map_tree *em_tree = &inode->extent_tree; + struct extent_map *em; + struct extent_map *split_pre = NULL; + struct extent_map *split_mid = NULL; + struct extent_map *split_post = NULL; + int ret = 0; + int modified; + unsigned long flags; + + /* Sanity check */ + if (pre == 0 && post == 0) + return 0; + + split_pre = alloc_extent_map(); + if (pre) + split_mid = alloc_extent_map(); + if (post) + split_post = alloc_extent_map(); + if (!split_pre || (pre && !split_mid) || (post && !split_post)) { + ret = -ENOMEM; + goto out; + } + + ASSERT(pre + post < len); + + lock_extent(&inode->io_tree, start, start + len - 1); + write_lock(&em_tree->lock); + em = lookup_extent_mapping(em_tree, start, len); + if (!em) { + ret = -EIO; + goto out_unlock; + } + + ASSERT(em->len == len); + ASSERT(!test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)); + ASSERT(em->block_start < EXTENT_MAP_LAST_BYTE); + + flags = em->flags; + clear_bit(EXTENT_FLAG_PINNED, &em->flags); + clear_bit(EXTENT_FLAG_LOGGING, &flags); + modified = !list_empty(&em->list); + + /* First, replace the em with a new extent_map starting from * em->start */ + split_pre->start = em->start; + split_pre->len = (pre ? pre : em->len - post); + split_pre->orig_start = split_pre->start; + split_pre->block_start = em->block_start; + split_pre->block_len = split_pre->len; + split_pre->orig_block_len = split_pre->block_len; + split_pre->ram_bytes = split_pre->len; + split_pre->flags = flags; + split_pre->compress_type = em->compress_type; + split_pre->generation = em->generation; + + replace_extent_mapping(em_tree, em, split_pre, modified); + + /* + * Now we only have an extent_map at: + * [em->start, em->start + pre] if pre != 0 + * [em->start, em->start + em->len - post] if pre == 0 + */ + + if (pre) { + /* Insert the middle extent_map */ + split_mid->start = em->start + pre; + split_mid->len = em->len - pre - post; + split_mid->orig_start = split_mid->start; + split_mid->block_start = em->block_start + pre; + split_mid->block_len = split_mid->len; + split_mid->orig_block_len = split_mid->block_len; + split_mid->ram_bytes = split_mid->len; + split_mid->flags = flags; + split_mid->compress_type = em->compress_type; + split_mid->generation = em->generation; + add_extent_mapping(em_tree, split_mid, modified); + } + + if (post) { + split_post->start = em->start + em->len - post; + split_post->len = post; + split_post->orig_start = split_post->start; + split_post->block_start = em->block_start + em->len - post; + split_post->block_len = split_post->len; + split_post->orig_block_len = split_post->block_len; + split_post->ram_bytes = split_post->len; + split_post->flags = flags; + split_post->compress_type = em->compress_type; + split_post->generation = em->generation; + add_extent_mapping(em_tree, split_post, modified); + } + + /* Once for us */ + free_extent_map(em); + /* Once for the tree */ + free_extent_map(em); + +out_unlock: + write_unlock(&em_tree->lock); + unlock_extent(&inode->io_tree, start, start + len - 1); +out: + free_extent_map(split_pre); + free_extent_map(split_mid); + free_extent_map(split_post); + + return ret; +} + static blk_status_t extract_ordered_extent(struct btrfs_inode *inode, struct bio *bio, loff_t file_offset) { struct btrfs_ordered_extent *ordered; - struct extent_map *em = NULL, *em_new = NULL; - struct extent_map_tree *em_tree = &inode->extent_tree; u64 start = (u64)bio->bi_iter.bi_sector << SECTOR_SHIFT; + u64 file_len; u64 len = bio->bi_iter.bi_size; u64 end = start + len; u64 ordered_end; @@ -2317,41 +2431,16 @@ static blk_status_t extract_ordered_extent(struct btrfs_inode *inode, goto out; } + file_len = ordered->num_bytes; pre = start - ordered->disk_bytenr; post = ordered_end - end; ret = btrfs_split_ordered_extent(ordered, pre, post); if (ret) goto out; - - read_lock(&em_tree->lock); - em = lookup_extent_mapping(em_tree, ordered->file_offset, len); - if (!em) { - read_unlock(&em_tree->lock); - ret = -EIO; - goto out; - } - read_unlock(&em_tree->lock); - - ASSERT(!test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)); - /* - * We cannot reuse em_new here but have to create a new one, as - * unpin_extent_cache() expects the start of the extent map to be the - * logical offset of the file, which does not hold true anymore after - * splitting. - */ - em_new = create_io_em(inode, em->start + pre, len, - em->start + pre, em->block_start + pre, len, - len, len, BTRFS_COMPRESS_NONE, - BTRFS_ORDERED_REGULAR); - if (IS_ERR(em_new)) { - ret = PTR_ERR(em_new); - goto out; - } - free_extent_map(em_new); + ret = split_zoned_em(inode, file_offset, file_len, pre, post); out: - free_extent_map(em); btrfs_put_ordered_extent(ordered); return errno_to_blk_status(ret); diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 50318231c1..14b9fdc8aa 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -254,23 +254,21 @@ static inline int extwriter_counter_read(struct btrfs_transaction *trans) } /* - * To be called after all the new block groups attached to the transaction - * handle have been created (btrfs_create_pending_block_groups()). + * To be called after doing the chunk btree updates right after allocating a new + * chunk (after btrfs_chunk_alloc_add_chunk_item() is called), when removing a + * chunk after all chunk btree updates and after finishing the second phase of + * chunk allocation (btrfs_create_pending_block_groups()) in case some block + * group had its chunk item insertion delayed to the second phase. */ void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans) { struct btrfs_fs_info *fs_info = trans->fs_info; - struct btrfs_transaction *cur_trans = trans->transaction; if (!trans->chunk_bytes_reserved) return; - WARN_ON_ONCE(!list_empty(&trans->new_bgs)); - btrfs_block_rsv_release(fs_info, &fs_info->chunk_block_rsv, trans->chunk_bytes_reserved, NULL); - atomic64_sub(trans->chunk_bytes_reserved, &cur_trans->chunk_bytes_reserved); - cond_wake_up(&cur_trans->chunk_reserve_wait); trans->chunk_bytes_reserved = 0; } @@ -386,8 +384,6 @@ static noinline int join_transaction(struct btrfs_fs_info *fs_info, spin_lock_init(&cur_trans->dropped_roots_lock); INIT_LIST_HEAD(&cur_trans->releasing_ebs); spin_lock_init(&cur_trans->releasing_ebs_lock); - atomic64_set(&cur_trans->chunk_bytes_reserved, 0); - init_waitqueue_head(&cur_trans->chunk_reserve_wait); list_add_tail(&cur_trans->list, &fs_info->trans_list); extent_io_tree_init(fs_info, &cur_trans->dirty_pages, IO_TREE_TRANS_DIRTY_PAGES, fs_info->btree_inode); @@ -701,7 +697,6 @@ start_transaction(struct btrfs_root *root, unsigned int num_items, h->fs_info = root->fs_info; h->type = type; - h->can_flush_pending_bgs = true; INIT_LIST_HEAD(&h->new_bgs); smp_mb(); diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h index 07d76029f5..ba45065f94 100644 --- a/fs/btrfs/transaction.h +++ b/fs/btrfs/transaction.h @@ -96,13 +96,6 @@ struct btrfs_transaction { spinlock_t releasing_ebs_lock; struct list_head releasing_ebs; - - /* - * The number of bytes currently reserved, by all transaction handles - * attached to this transaction, for metadata extents of the chunk tree. - */ - atomic64_t chunk_bytes_reserved; - wait_queue_head_t chunk_reserve_wait; }; #define __TRANS_FREEZABLE (1U << 0) @@ -139,7 +132,7 @@ struct btrfs_trans_handle { short aborted; bool adding_csums; bool allocating_chunk; - bool can_flush_pending_bgs; + bool removing_chunk; bool reloc_reserved; bool in_fsync; struct btrfs_root *root; diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index cab451d195..dc6eb088d7 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -3173,7 +3173,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans, if (!log_root_tree->node) { ret = btrfs_alloc_log_tree_node(trans, log_root_tree); if (ret) { - mutex_unlock(&fs_info->tree_log_mutex); + mutex_unlock(&fs_info->tree_root->log_mutex); goto out; } } diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 807502cd65..1e4d43ffe3 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -1745,19 +1745,14 @@ static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans, extent = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_extent); } else { - btrfs_handle_fs_error(fs_info, ret, "Slot search failed"); goto out; } *dev_extent_len = btrfs_dev_extent_length(leaf, extent); ret = btrfs_del_item(trans, root, path); - if (ret) { - btrfs_handle_fs_error(fs_info, ret, - "Failed to remove dev extent item"); - } else { + if (ret == 0) set_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags); - } out: btrfs_free_path(path); return ret; @@ -2942,7 +2937,7 @@ static int btrfs_del_sys_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset) u32 cur; struct btrfs_key key; - mutex_lock(&fs_info->chunk_mutex); + lockdep_assert_held(&fs_info->chunk_mutex); array_size = btrfs_super_sys_array_size(super_copy); ptr = super_copy->sys_chunk_array; @@ -2972,7 +2967,6 @@ static int btrfs_del_sys_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset) cur += len; } } - mutex_unlock(&fs_info->chunk_mutex); return ret; } @@ -3012,6 +3006,29 @@ struct extent_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info, return em; } +static int remove_chunk_item(struct btrfs_trans_handle *trans, + struct map_lookup *map, u64 chunk_offset) +{ + int i; + + /* + * Removing chunk items and updating the device items in the chunks btree + * requires holding the chunk_mutex. + * See the comment at btrfs_chunk_alloc() for the details. + */ + lockdep_assert_held(&trans->fs_info->chunk_mutex); + + for (i = 0; i < map->num_stripes; i++) { + int ret; + + ret = btrfs_update_device(trans, map->stripes[i].dev); + if (ret) + return ret; + } + + return btrfs_free_chunk(trans, chunk_offset); +} + int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset) { struct btrfs_fs_info *fs_info = trans->fs_info; @@ -3032,14 +3049,16 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset) return PTR_ERR(em); } map = em->map_lookup; - mutex_lock(&fs_info->chunk_mutex); - check_system_chunk(trans, map->type); - mutex_unlock(&fs_info->chunk_mutex); /* - * Take the device list mutex to prevent races with the final phase of - * a device replace operation that replaces the device object associated - * with map stripes (dev-replace.c:btrfs_dev_replace_finishing()). + * First delete the device extent items from the devices btree. + * We take the device_list_mutex to avoid racing with the finishing phase + * of a device replace operation. See the comment below before acquiring + * fs_info->chunk_mutex. Note that here we do not acquire the chunk_mutex + * because that can result in a deadlock when deleting the device extent + * items from the devices btree - COWing an extent buffer from the btree + * may result in allocating a new metadata chunk, which would attempt to + * lock again fs_info->chunk_mutex. */ mutex_lock(&fs_devices->device_list_mutex); for (i = 0; i < map->num_stripes; i++) { @@ -3061,18 +3080,73 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset) btrfs_clear_space_info_full(fs_info); mutex_unlock(&fs_info->chunk_mutex); } - - ret = btrfs_update_device(trans, device); - if (ret) { - mutex_unlock(&fs_devices->device_list_mutex); - btrfs_abort_transaction(trans, ret); - goto out; - } } mutex_unlock(&fs_devices->device_list_mutex); - ret = btrfs_free_chunk(trans, chunk_offset); - if (ret) { + /* + * We acquire fs_info->chunk_mutex for 2 reasons: + * + * 1) Just like with the first phase of the chunk allocation, we must + * reserve system space, do all chunk btree updates and deletions, and + * update the system chunk array in the superblock while holding this + * mutex. This is for similar reasons as explained on the comment at + * the top of btrfs_chunk_alloc(); + * + * 2) Prevent races with the final phase of a device replace operation + * that replaces the device object associated with the map's stripes, + * because the device object's id can change at any time during that + * final phase of the device replace operation + * (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the + * replaced device and then see it with an ID of + * BTRFS_DEV_REPLACE_DEVID, which would cause a failure when updating + * the device item, which does not exists on the chunk btree. + * The finishing phase of device replace acquires both the + * device_list_mutex and the chunk_mutex, in that order, so we are + * safe by just acquiring the chunk_mutex. + */ + trans->removing_chunk = true; + mutex_lock(&fs_info->chunk_mutex); + + check_system_chunk(trans, map->type); + + ret = remove_chunk_item(trans, map, chunk_offset); + /* + * Normally we should not get -ENOSPC since we reserved space before + * through the call to check_system_chunk(). + * + * Despite our system space_info having enough free space, we may not + * be able to allocate extents from its block groups, because all have + * an incompatible profile, which will force us to allocate a new system + * block group with the right profile, or right after we called + * check_system_space() above, a scrub turned the only system block group + * with enough free space into RO mode. + * This is explained with more detail at do_chunk_alloc(). + * + * So if we get -ENOSPC, allocate a new system chunk and retry once. + */ + if (ret == -ENOSPC) { + const u64 sys_flags = btrfs_system_alloc_profile(fs_info); + struct btrfs_block_group *sys_bg; + + sys_bg = btrfs_alloc_chunk(trans, sys_flags); + if (IS_ERR(sys_bg)) { + ret = PTR_ERR(sys_bg); + btrfs_abort_transaction(trans, ret); + goto out; + } + + ret = btrfs_chunk_alloc_add_chunk_item(trans, sys_bg); + if (ret) { + btrfs_abort_transaction(trans, ret); + goto out; + } + + ret = remove_chunk_item(trans, map, chunk_offset); + if (ret) { + btrfs_abort_transaction(trans, ret); + goto out; + } + } else if (ret) { btrfs_abort_transaction(trans, ret); goto out; } @@ -3087,6 +3161,15 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset) } } + mutex_unlock(&fs_info->chunk_mutex); + trans->removing_chunk = false; + + /* + * We are done with chunk btree updates and deletions, so release the + * system space we previously reserved (with check_system_chunk()). + */ + btrfs_trans_release_chunk_metadata(trans); + ret = btrfs_remove_block_group(trans, chunk_offset, em); if (ret) { btrfs_abort_transaction(trans, ret); @@ -3094,6 +3177,10 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset) } out: + if (trans->removing_chunk) { + mutex_unlock(&fs_info->chunk_mutex); + trans->removing_chunk = false; + } /* once for us */ free_extent_map(em); return ret; @@ -4860,13 +4947,12 @@ static int btrfs_add_system_chunk(struct btrfs_fs_info *fs_info, u32 array_size; u8 *ptr; - mutex_lock(&fs_info->chunk_mutex); + lockdep_assert_held(&fs_info->chunk_mutex); + array_size = btrfs_super_sys_array_size(super_copy); if (array_size + item_size + sizeof(disk_key) - > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) { - mutex_unlock(&fs_info->chunk_mutex); + > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) return -EFBIG; - } ptr = super_copy->sys_chunk_array + array_size; btrfs_cpu_key_to_disk(&disk_key, key); @@ -4875,7 +4961,6 @@ static int btrfs_add_system_chunk(struct btrfs_fs_info *fs_info, memcpy(ptr, chunk, item_size); item_size += sizeof(disk_key); btrfs_set_super_sys_array_size(super_copy, array_size + item_size); - mutex_unlock(&fs_info->chunk_mutex); return 0; } @@ -5225,13 +5310,14 @@ static int decide_stripe_size(struct btrfs_fs_devices *fs_devices, } } -static int create_chunk(struct btrfs_trans_handle *trans, +static struct btrfs_block_group *create_chunk(struct btrfs_trans_handle *trans, struct alloc_chunk_ctl *ctl, struct btrfs_device_info *devices_info) { struct btrfs_fs_info *info = trans->fs_info; struct map_lookup *map = NULL; struct extent_map_tree *em_tree; + struct btrfs_block_group *block_group; struct extent_map *em; u64 start = ctl->start; u64 type = ctl->type; @@ -5241,7 +5327,7 @@ static int create_chunk(struct btrfs_trans_handle *trans, map = kmalloc(map_lookup_size(ctl->num_stripes), GFP_NOFS); if (!map) - return -ENOMEM; + return ERR_PTR(-ENOMEM); map->num_stripes = ctl->num_stripes; for (i = 0; i < ctl->ndevs; ++i) { @@ -5263,7 +5349,7 @@ static int create_chunk(struct btrfs_trans_handle *trans, em = alloc_extent_map(); if (!em) { kfree(map); - return -ENOMEM; + return ERR_PTR(-ENOMEM); } set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags); em->map_lookup = map; @@ -5279,12 +5365,12 @@ static int create_chunk(struct btrfs_trans_handle *trans, if (ret) { write_unlock(&em_tree->lock); free_extent_map(em); - return ret; + return ERR_PTR(ret); } write_unlock(&em_tree->lock); - ret = btrfs_make_block_group(trans, 0, type, start, ctl->chunk_size); - if (ret) + block_group = btrfs_make_block_group(trans, 0, type, start, ctl->chunk_size); + if (IS_ERR(block_group)) goto error_del_extent; for (i = 0; i < map->num_stripes; i++) { @@ -5304,7 +5390,7 @@ static int create_chunk(struct btrfs_trans_handle *trans, check_raid56_incompat_flag(info, type); check_raid1c34_incompat_flag(info, type); - return 0; + return block_group; error_del_extent: write_lock(&em_tree->lock); @@ -5316,34 +5402,36 @@ static int create_chunk(struct btrfs_trans_handle *trans, /* One for the tree reference */ free_extent_map(em); - return ret; + return block_group; } -int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, u64 type) +struct btrfs_block_group *btrfs_alloc_chunk(struct btrfs_trans_handle *trans, + u64 type) { struct btrfs_fs_info *info = trans->fs_info; struct btrfs_fs_devices *fs_devices = info->fs_devices; struct btrfs_device_info *devices_info = NULL; struct alloc_chunk_ctl ctl; + struct btrfs_block_group *block_group; int ret; lockdep_assert_held(&info->chunk_mutex); if (!alloc_profile_is_valid(type, 0)) { ASSERT(0); - return -EINVAL; + return ERR_PTR(-EINVAL); } if (list_empty(&fs_devices->alloc_list)) { if (btrfs_test_opt(info, ENOSPC_DEBUG)) btrfs_debug(info, "%s: no writable device", __func__); - return -ENOSPC; + return ERR_PTR(-ENOSPC); } if (!(type & BTRFS_BLOCK_GROUP_TYPE_MASK)) { btrfs_err(info, "invalid chunk type 0x%llx requested", type); ASSERT(0); - return -EINVAL; + return ERR_PTR(-EINVAL); } ctl.start = find_next_chunk(info); @@ -5353,46 +5441,43 @@ int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, u64 type) devices_info = kcalloc(fs_devices->rw_devices, sizeof(*devices_info), GFP_NOFS); if (!devices_info) - return -ENOMEM; + return ERR_PTR(-ENOMEM); ret = gather_device_info(fs_devices, &ctl, devices_info); - if (ret < 0) + if (ret < 0) { + block_group = ERR_PTR(ret); goto out; + } ret = decide_stripe_size(fs_devices, &ctl, devices_info); - if (ret < 0) + if (ret < 0) { + block_group = ERR_PTR(ret); goto out; + } - ret = create_chunk(trans, &ctl, devices_info); + block_group = create_chunk(trans, &ctl, devices_info); out: kfree(devices_info); - return ret; + return block_group; } /* - * Chunk allocation falls into two parts. The first part does work - * that makes the new allocated chunk usable, but does not do any operation - * that modifies the chunk tree. The second part does the work that - * requires modifying the chunk tree. This division is important for the - * bootstrap process of adding storage to a seed btrfs. + * This function, btrfs_finish_chunk_alloc(), belongs to phase 2. + * + * See the comment at btrfs_chunk_alloc() for details about the chunk allocation + * phases. */ int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans, u64 chunk_offset, u64 chunk_size) { struct btrfs_fs_info *fs_info = trans->fs_info; - struct btrfs_root *extent_root = fs_info->extent_root; - struct btrfs_root *chunk_root = fs_info->chunk_root; - struct btrfs_key key; struct btrfs_device *device; - struct btrfs_chunk *chunk; - struct btrfs_stripe *stripe; struct extent_map *em; struct map_lookup *map; - size_t item_size; u64 dev_offset; u64 stripe_size; - int i = 0; + int i; int ret = 0; em = btrfs_get_chunk_map(fs_info, chunk_offset, chunk_size); @@ -5400,53 +5485,117 @@ int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans, return PTR_ERR(em); map = em->map_lookup; - item_size = btrfs_chunk_item_size(map->num_stripes); stripe_size = em->orig_block_len; - chunk = kzalloc(item_size, GFP_NOFS); - if (!chunk) { - ret = -ENOMEM; - goto out; - } - /* * Take the device list mutex to prevent races with the final phase of * a device replace operation that replaces the device object associated * with the map's stripes, because the device object's id can change * at any time during that final phase of the device replace operation - * (dev-replace.c:btrfs_dev_replace_finishing()). + * (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the + * replaced device and then see it with an ID of BTRFS_DEV_REPLACE_DEVID, + * resulting in persisting a device extent item with such ID. */ mutex_lock(&fs_info->fs_devices->device_list_mutex); for (i = 0; i < map->num_stripes; i++) { device = map->stripes[i].dev; dev_offset = map->stripes[i].physical; - ret = btrfs_update_device(trans, device); - if (ret) - break; ret = btrfs_alloc_dev_extent(trans, device, chunk_offset, dev_offset, stripe_size); if (ret) break; } - if (ret) { - mutex_unlock(&fs_info->fs_devices->device_list_mutex); + mutex_unlock(&fs_info->fs_devices->device_list_mutex); + + free_extent_map(em); + return ret; +} + +/* + * This function, btrfs_chunk_alloc_add_chunk_item(), typically belongs to the + * phase 1 of chunk allocation. It belongs to phase 2 only when allocating system + * chunks. + * + * See the comment at btrfs_chunk_alloc() for details about the chunk allocation + * phases. + */ +int btrfs_chunk_alloc_add_chunk_item(struct btrfs_trans_handle *trans, + struct btrfs_block_group *bg) +{ + struct btrfs_fs_info *fs_info = trans->fs_info; + struct btrfs_root *extent_root = fs_info->extent_root; + struct btrfs_root *chunk_root = fs_info->chunk_root; + struct btrfs_key key; + struct btrfs_chunk *chunk; + struct btrfs_stripe *stripe; + struct extent_map *em; + struct map_lookup *map; + size_t item_size; + int i; + int ret; + + /* + * We take the chunk_mutex for 2 reasons: + * + * 1) Updates and insertions in the chunk btree must be done while holding + * the chunk_mutex, as well as updating the system chunk array in the + * superblock. See the comment on top of btrfs_chunk_alloc() for the + * details; + * + * 2) To prevent races with the final phase of a device replace operation + * that replaces the device object associated with the map's stripes, + * because the device object's id can change at any time during that + * final phase of the device replace operation + * (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the + * replaced device and then see it with an ID of BTRFS_DEV_REPLACE_DEVID, + * which would cause a failure when updating the device item, which does + * not exists, or persisting a stripe of the chunk item with such ID. + * Here we can't use the device_list_mutex because our caller already + * has locked the chunk_mutex, and the final phase of device replace + * acquires both mutexes - first the device_list_mutex and then the + * chunk_mutex. Using any of those two mutexes protects us from a + * concurrent device replace. + */ + lockdep_assert_held(&fs_info->chunk_mutex); + + em = btrfs_get_chunk_map(fs_info, bg->start, bg->length); + if (IS_ERR(em)) { + ret = PTR_ERR(em); + btrfs_abort_transaction(trans, ret); + return ret; + } + + map = em->map_lookup; + item_size = btrfs_chunk_item_size(map->num_stripes); + + chunk = kzalloc(item_size, GFP_NOFS); + if (!chunk) { + ret = -ENOMEM; + btrfs_abort_transaction(trans, ret); goto out; } + for (i = 0; i < map->num_stripes; i++) { + struct btrfs_device *device = map->stripes[i].dev; + + ret = btrfs_update_device(trans, device); + if (ret) + goto out; + } + stripe = &chunk->stripe; for (i = 0; i < map->num_stripes; i++) { - device = map->stripes[i].dev; - dev_offset = map->stripes[i].physical; + struct btrfs_device *device = map->stripes[i].dev; + const u64 dev_offset = map->stripes[i].physical; btrfs_set_stack_stripe_devid(stripe, device->devid); btrfs_set_stack_stripe_offset(stripe, dev_offset); memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE); stripe++; } - mutex_unlock(&fs_info->fs_devices->device_list_mutex); - btrfs_set_stack_chunk_length(chunk, chunk_size); + btrfs_set_stack_chunk_length(chunk, bg->length); btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid); btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len); btrfs_set_stack_chunk_type(chunk, map->type); @@ -5458,15 +5607,18 @@ int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans, key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; key.type = BTRFS_CHUNK_ITEM_KEY; - key.offset = chunk_offset; + key.offset = bg->start; ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size); - if (ret == 0 && map->type & BTRFS_BLOCK_GROUP_SYSTEM) { - /* - * TODO: Cleanup of inserted chunk root in case of - * failure. - */ + if (ret) + goto out; + + bg->chunk_item_inserted = 1; + + if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) { ret = btrfs_add_system_chunk(fs_info, &key, chunk, item_size); + if (ret) + goto out; } out: @@ -5479,16 +5631,41 @@ static noinline int init_first_rw_device(struct btrfs_trans_handle *trans) { struct btrfs_fs_info *fs_info = trans->fs_info; u64 alloc_profile; - int ret; + struct btrfs_block_group *meta_bg; + struct btrfs_block_group *sys_bg; + + /* + * When adding a new device for sprouting, the seed device is read-only + * so we must first allocate a metadata and a system chunk. But before + * adding the block group items to the extent, device and chunk btrees, + * we must first: + * + * 1) Create both chunks without doing any changes to the btrees, as + * otherwise we would get -ENOSPC since the block groups from the + * seed device are read-only; + * + * 2) Add the device item for the new sprout device - finishing the setup + * of a new block group requires updating the device item in the chunk + * btree, so it must exist when we attempt to do it. The previous step + * ensures this does not fail with -ENOSPC. + * + * After that we can add the block group items to their btrees: + * update existing device item in the chunk btree, add a new block group + * item to the extent btree, add a new chunk item to the chunk btree and + * finally add the new device extent items to the devices btree. + */ alloc_profile = btrfs_metadata_alloc_profile(fs_info); - ret = btrfs_alloc_chunk(trans, alloc_profile); - if (ret) - return ret; + meta_bg = btrfs_alloc_chunk(trans, alloc_profile); + if (IS_ERR(meta_bg)) + return PTR_ERR(meta_bg); alloc_profile = btrfs_system_alloc_profile(fs_info); - ret = btrfs_alloc_chunk(trans, alloc_profile); - return ret; + sys_bg = btrfs_alloc_chunk(trans, alloc_profile); + if (IS_ERR(sys_bg)) + return PTR_ERR(sys_bg); + + return 0; } static inline int btrfs_chunk_max_errors(struct map_lookup *map) @@ -7415,10 +7592,18 @@ int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info) total_dev++; } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) { struct btrfs_chunk *chunk; + + /* + * We are only called at mount time, so no need to take + * fs_info->chunk_mutex. Plus, to avoid lockdep warnings, + * we always lock first fs_info->chunk_mutex before + * acquiring any locks on the chunk tree. This is a + * requirement for chunk allocation, see the comment on + * top of btrfs_chunk_alloc() for details. + */ + ASSERT(!test_bit(BTRFS_FS_OPEN, &fs_info->flags)); chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); - mutex_lock(&fs_info->chunk_mutex); ret = read_one_chunk(&found_key, leaf, chunk); - mutex_unlock(&fs_info->chunk_mutex); if (ret) goto error; } diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index c7fc7caf57..55a8ba2447 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h @@ -450,7 +450,8 @@ int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, struct extent_map *map, struct btrfs_io_geometry *io_geom); int btrfs_read_sys_array(struct btrfs_fs_info *fs_info); int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info); -int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, u64 type); +struct btrfs_block_group *btrfs_alloc_chunk(struct btrfs_trans_handle *trans, + u64 type); void btrfs_mapping_tree_free(struct extent_map_tree *tree); blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio, int mirror_num); @@ -509,6 +510,8 @@ unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info, u64 logical); int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans, u64 chunk_offset, u64 chunk_size); +int btrfs_chunk_alloc_add_chunk_item(struct btrfs_trans_handle *trans, + struct btrfs_block_group *bg); int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset); struct extent_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info, u64 logical, u64 length); diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c index 57f91311fd..007427ba75 100644 --- a/fs/cifs/cifs_dfs_ref.c +++ b/fs/cifs/cifs_dfs_ref.c @@ -176,7 +176,7 @@ char *cifs_compose_mount_options(const char *sb_mountdata, } } - rc = dns_resolve_server_name_to_ip(name, &srvIP); + rc = dns_resolve_server_name_to_ip(name, &srvIP, NULL); if (rc < 0) { cifs_dbg(FYI, "%s: Failed to resolve server part of %s to IP: %d\n", __func__, name, rc); @@ -211,6 +211,10 @@ char *cifs_compose_mount_options(const char *sb_mountdata, else noff = tkn_e - (sb_mountdata + off) + 1; + if (strncasecmp(sb_mountdata + off, "cruid=", 6) == 0) { + off += noff; + continue; + } if (strncasecmp(sb_mountdata + off, "unc=", 4) == 0) { off += noff; continue; diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index 3c2e117bb9..c0bfc2f010 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -75,6 +75,9 @@ #define SMB_ECHO_INTERVAL_MAX 600 #define SMB_ECHO_INTERVAL_DEFAULT 60 +/* dns resolution interval in seconds */ +#define SMB_DNS_RESOLVE_INTERVAL_DEFAULT 600 + /* maximum number of PDUs in one compound */ #define MAX_COMPOUND 5 @@ -646,6 +649,7 @@ struct TCP_Server_Info { /* point to the SMBD connection if RDMA is used instead of socket */ struct smbd_connection *smbd_conn; struct delayed_work echo; /* echo ping workqueue job */ + struct delayed_work resolve; /* dns resolution workqueue job */ char *smallbuf; /* pointer to current "small" buffer */ char *bigbuf; /* pointer to current "big" buffer */ /* Total size of this PDU. Only valid from cifs_demultiplex_thread */ @@ -689,6 +693,9 @@ struct TCP_Server_Info { bool use_swn_dstaddr; struct sockaddr_storage swn_dstaddr; #endif +#ifdef CONFIG_CIFS_DFS_UPCALL + bool is_dfs_conn; /* if a dfs connection */ +#endif }; struct cifs_credits { diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 01dc45178f..1b04d6ec14 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -78,6 +78,8 @@ static int reconn_set_ipaddr_from_hostname(struct TCP_Server_Info *server) int rc; int len; char *unc, *ipaddr = NULL; + time64_t expiry, now; + unsigned long ttl = SMB_DNS_RESOLVE_INTERVAL_DEFAULT; if (!server->hostname) return -EINVAL; @@ -91,13 +93,13 @@ static int reconn_set_ipaddr_from_hostname(struct TCP_Server_Info *server) } scnprintf(unc, len, "\\\\%s", server->hostname); - rc = dns_resolve_server_name_to_ip(unc, &ipaddr); + rc = dns_resolve_server_name_to_ip(unc, &ipaddr, &expiry); kfree(unc); if (rc < 0) { cifs_dbg(FYI, "%s: failed to resolve server part of %s to IP: %d\n", __func__, server->hostname, rc); - return rc; + goto requeue_resolve; } spin_lock(&cifs_tcp_ses_lock); @@ -106,7 +108,45 @@ static int reconn_set_ipaddr_from_hostname(struct TCP_Server_Info *server) spin_unlock(&cifs_tcp_ses_lock); kfree(ipaddr); - return !rc ? -1 : 0; + /* rc == 1 means success here */ + if (rc) { + now = ktime_get_real_seconds(); + if (expiry && expiry > now) + /* + * To make sure we don't use the cached entry, retry 1s + * after expiry. + */ + ttl = (expiry - now + 1); + } + rc = !rc ? -1 : 0; + +requeue_resolve: + cifs_dbg(FYI, "%s: next dns resolution scheduled for %lu seconds in the future\n", + __func__, ttl); + mod_delayed_work(cifsiod_wq, &server->resolve, (ttl * HZ)); + + return rc; +} + + +static void cifs_resolve_server(struct work_struct *work) +{ + int rc; + struct TCP_Server_Info *server = container_of(work, + struct TCP_Server_Info, resolve.work); + + mutex_lock(&server->srv_mutex); + + /* + * Resolve the hostname again to make sure that IP address is up-to-date. + */ + rc = reconn_set_ipaddr_from_hostname(server); + if (rc) { + cifs_dbg(FYI, "%s: failed to resolve hostname: %d\n", + __func__, rc); + } + + mutex_unlock(&server->srv_mutex); } #ifdef CONFIG_CIFS_DFS_UPCALL @@ -680,6 +720,7 @@ static void clean_demultiplex_info(struct TCP_Server_Info *server) spin_unlock(&cifs_tcp_ses_lock); cancel_delayed_work_sync(&server->echo); + cancel_delayed_work_sync(&server->resolve); spin_lock(&GlobalMid_Lock); server->tcpStatus = CifsExiting; @@ -1227,6 +1268,16 @@ cifs_find_tcp_session(struct smb3_fs_context *ctx) spin_lock(&cifs_tcp_ses_lock); list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) { +#ifdef CONFIG_CIFS_DFS_UPCALL + /* + * DFS failover implementation in cifs_reconnect() requires unique tcp sessions for + * DFS connections to do failover properly, so avoid sharing them with regular + * shares or even links that may connect to same server but having completely + * different failover targets. + */ + if (server->is_dfs_conn) + continue; +#endif /* * Skip ses channels since they're only handled in lower layers * (e.g. cifs_send_recv). @@ -1254,12 +1305,16 @@ cifs_put_tcp_session(struct TCP_Server_Info *server, int from_reconnect) return; } + /* srv_count can never go negative */ + WARN_ON(server->srv_count < 0); + put_net(cifs_net_ns(server)); list_del_init(&server->tcp_ses_list); spin_unlock(&cifs_tcp_ses_lock); cancel_delayed_work_sync(&server->echo); + cancel_delayed_work_sync(&server->resolve); if (from_reconnect) /* @@ -1342,6 +1397,7 @@ cifs_get_tcp_session(struct smb3_fs_context *ctx) INIT_LIST_HEAD(&tcp_ses->tcp_ses_list); INIT_LIST_HEAD(&tcp_ses->smb_ses_list); INIT_DELAYED_WORK(&tcp_ses->echo, cifs_echo_request); + INIT_DELAYED_WORK(&tcp_ses->resolve, cifs_resolve_server); INIT_DELAYED_WORK(&tcp_ses->reconnect, smb2_reconnect_server); mutex_init(&tcp_ses->reconnect_mutex); memcpy(&tcp_ses->srcaddr, &ctx->srcaddr, @@ -1427,6 +1483,12 @@ cifs_get_tcp_session(struct smb3_fs_context *ctx) /* queue echo request delayed work */ queue_delayed_work(cifsiod_wq, &tcp_ses->echo, tcp_ses->echo_interval); + /* queue dns resolution delayed work */ + cifs_dbg(FYI, "%s: next dns resolution scheduled for %d seconds in the future\n", + __func__, SMB_DNS_RESOLVE_INTERVAL_DEFAULT); + + queue_delayed_work(cifsiod_wq, &tcp_ses->resolve, (SMB_DNS_RESOLVE_INTERVAL_DEFAULT * HZ)); + return tcp_ses; out_err_crypto_release: @@ -1605,6 +1667,9 @@ void cifs_put_smb_ses(struct cifs_ses *ses) } spin_unlock(&cifs_tcp_ses_lock); + /* ses_count can never go negative */ + WARN_ON(ses->ses_count < 0); + spin_lock(&GlobalMid_Lock); if (ses->status == CifsGood) ses->status = CifsExiting; @@ -1972,6 +2037,9 @@ cifs_put_tcon(struct cifs_tcon *tcon) return; } + /* tc_count can never go negative */ + WARN_ON(tcon->tc_count < 0); + if (tcon->use_witness) { int rc; @@ -2910,6 +2978,23 @@ static int mount_setup_tlink(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses, } #ifdef CONFIG_CIFS_DFS_UPCALL +static int mount_get_dfs_conns(struct smb3_fs_context *ctx, struct cifs_sb_info *cifs_sb, + unsigned int *xid, struct TCP_Server_Info **nserver, + struct cifs_ses **nses, struct cifs_tcon **ntcon) +{ + int rc; + + ctx->nosharesock = true; + rc = mount_get_conns(ctx, cifs_sb, xid, nserver, nses, ntcon); + if (*nserver) { + cifs_dbg(FYI, "%s: marking tcp session as a dfs connection\n", __func__); + spin_lock(&cifs_tcp_ses_lock); + (*nserver)->is_dfs_conn = true; + spin_unlock(&cifs_tcp_ses_lock); + } + return rc; +} + /* * cifs_build_path_to_root returns full path to root when we do not have an * existing connection (tcon) @@ -3105,7 +3190,7 @@ static int do_dfs_failover(const char *path, const char *full_path, struct cifs_ tmp_ctx.prepath); mount_put_conns(cifs_sb, *xid, *server, *ses, *tcon); - rc = mount_get_conns(&tmp_ctx, cifs_sb, xid, server, ses, tcon); + rc = mount_get_dfs_conns(&tmp_ctx, cifs_sb, xid, server, ses, tcon); if (!rc || (*server && *ses)) { /* * We were able to connect to new target server. Update current context with @@ -3404,7 +3489,12 @@ int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx) goto error; } - ctx->nosharesock = true; + mount_put_conns(cifs_sb, xid, server, ses, tcon); + /* + * Ignore error check here because we may failover to other targets from cached a + * referral. + */ + (void)mount_get_dfs_conns(ctx, cifs_sb, &xid, &server, &ses, &tcon); /* Get path of DFS root */ ref_path = build_unc_path_to_root(ctx, cifs_sb, false); @@ -3433,7 +3523,7 @@ int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx) /* Connect to new DFS target only if we were redirected */ if (oldmnt != cifs_sb->ctx->mount_options) { mount_put_conns(cifs_sb, xid, server, ses, tcon); - rc = mount_get_conns(ctx, cifs_sb, &xid, &server, &ses, &tcon); + rc = mount_get_dfs_conns(ctx, cifs_sb, &xid, &server, &ses, &tcon); } if (rc && !server && !ses) { /* Failed to connect. Try to connect to other targets in the referral. */ @@ -3459,7 +3549,7 @@ int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx) rc = -ELOOP; } while (rc == -EREMOTE); - if (rc || !tcon) + if (rc || !tcon || !ses) goto error; kfree(ref_path); @@ -4095,7 +4185,8 @@ int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const stru if (!tree) return -ENOMEM; - if (!tcon->dfs_path) { + /* If it is not dfs or there was no cached dfs referral, then reconnect to same share */ + if (!tcon->dfs_path || dfs_cache_noreq_find(tcon->dfs_path + 1, &ref, &tl)) { if (tcon->ipc) { scnprintf(tree, MAX_TREE_SIZE, "\\\\%s\\IPC$", server->hostname); rc = ops->tree_connect(xid, tcon->ses, tree, tcon, nlsc); @@ -4105,9 +4196,6 @@ int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const stru goto out; } - rc = dfs_cache_noreq_find(tcon->dfs_path + 1, &ref, &tl); - if (rc) - goto out; isroot = ref.server_type == DFS_TYPE_ROOT; free_dfs_info_param(&ref); diff --git a/fs/cifs/dns_resolve.c b/fs/cifs/dns_resolve.c index d15b82d569..8c616aaeb7 100644 --- a/fs/cifs/dns_resolve.c +++ b/fs/cifs/dns_resolve.c @@ -24,6 +24,7 @@ * dns_resolve_server_name_to_ip - Resolve UNC server name to ip address. * @unc: UNC path specifying the server (with '/' as delimiter) * @ip_addr: Where to return the IP address. + * @expiry: Where to return the expiry time for the dns record. * * The IP address will be returned in string form, and the caller is * responsible for freeing it. @@ -31,7 +32,7 @@ * Returns length of result on success, -ve on error. */ int -dns_resolve_server_name_to_ip(const char *unc, char **ip_addr) +dns_resolve_server_name_to_ip(const char *unc, char **ip_addr, time64_t *expiry) { struct sockaddr_storage ss; const char *hostname, *sep; @@ -66,13 +67,14 @@ dns_resolve_server_name_to_ip(const char *unc, char **ip_addr) /* Perform the upcall */ rc = dns_query(current->nsproxy->net_ns, NULL, hostname, len, - NULL, ip_addr, NULL, false); + NULL, ip_addr, expiry, false); if (rc < 0) cifs_dbg(FYI, "%s: unable to resolve: %*.*s\n", __func__, len, len, hostname); else - cifs_dbg(FYI, "%s: resolved: %*.*s to %s\n", - __func__, len, len, hostname, *ip_addr); + cifs_dbg(FYI, "%s: resolved: %*.*s to %s expiry %llu\n", + __func__, len, len, hostname, *ip_addr, + expiry ? (*expiry) : 0); return rc; name_is_IP_address: diff --git a/fs/cifs/dns_resolve.h b/fs/cifs/dns_resolve.h index 5be060b82b..9fa2807ef7 100644 --- a/fs/cifs/dns_resolve.h +++ b/fs/cifs/dns_resolve.h @@ -12,7 +12,7 @@ #define _DNS_RESOLVE_H #ifdef __KERNEL__ -extern int dns_resolve_server_name_to_ip(const char *unc, char **ip_addr); +extern int dns_resolve_server_name_to_ip(const char *unc, char **ip_addr, time64_t *expiry); #endif /* KERNEL */ #endif /* _DNS_RESOLVE_H */ diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c index 184138b4eb..844abeb2b4 100644 --- a/fs/cifs/misc.c +++ b/fs/cifs/misc.c @@ -1187,7 +1187,7 @@ int match_target_ip(struct TCP_Server_Info *server, cifs_dbg(FYI, "%s: target name: %s\n", __func__, target + 2); - rc = dns_resolve_server_name_to_ip(target, &tip); + rc = dns_resolve_server_name_to_ip(target, &tip, NULL); if (rc < 0) goto out; diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index e4c8f603dd..ba3c58e1f7 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c @@ -557,8 +557,8 @@ parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf, p = buf; while (bytes_left >= sizeof(*p)) { info->speed = le64_to_cpu(p->LinkSpeed); - info->rdma_capable = le32_to_cpu(p->Capability & RDMA_CAPABLE); - info->rss_capable = le32_to_cpu(p->Capability & RSS_CAPABLE); + info->rdma_capable = le32_to_cpu(p->Capability & RDMA_CAPABLE) ? 1 : 0; + info->rss_capable = le32_to_cpu(p->Capability & RSS_CAPABLE) ? 1 : 0; cifs_dbg(FYI, "%s: adding iface %zu\n", __func__, *iface_count); cifs_dbg(FYI, "%s: speed %zu bps\n", __func__, info->speed); @@ -2910,6 +2910,8 @@ smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses, /* ipc tcons are not refcounted */ spin_lock(&cifs_tcp_ses_lock); tcon->tc_count--; + /* tc_count can never go negative */ + WARN_ON(tcon->tc_count < 0); spin_unlock(&cifs_tcp_ses_lock); } kfree(utf16_path); diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h index 4b27cb9105..e9cac7970b 100644 --- a/fs/cifs/smb2pdu.h +++ b/fs/cifs/smb2pdu.h @@ -394,6 +394,7 @@ struct smb2_compression_capabilities_context { __u16 Padding; __u32 Flags; __le16 CompressionAlgorithms[3]; + __u16 Pad; /* Some servers require pad to DataLen multiple of 8 */ /* Check if pad needed */ } __packed; diff --git a/fs/configfs/file.c b/fs/configfs/file.c index 2f63bf3a73..5a0be9985b 100644 --- a/fs/configfs/file.c +++ b/fs/configfs/file.c @@ -91,7 +91,10 @@ static ssize_t configfs_read_iter(struct kiocb *iocb, struct iov_iter *to) } pr_debug("%s: count = %zd, pos = %lld, buf = %s\n", __func__, iov_iter_count(to), iocb->ki_pos, buffer->page); - retval = copy_to_iter(buffer->page, buffer->count, to); + if (iocb->ki_pos >= buffer->count) + goto out; + retval = copy_to_iter(buffer->page + iocb->ki_pos, + buffer->count - iocb->ki_pos, to); iocb->ki_pos += retval; if (retval == 0) retval = -EFAULT; @@ -162,7 +165,10 @@ static ssize_t configfs_bin_read_iter(struct kiocb *iocb, struct iov_iter *to) buffer->needs_read_fill = 0; } - retval = copy_to_iter(buffer->bin_buffer, buffer->bin_buffer_size, to); + if (iocb->ki_pos >= buffer->bin_buffer_size) + goto out; + retval = copy_to_iter(buffer->bin_buffer + iocb->ki_pos, + buffer->bin_buffer_size - iocb->ki_pos, to); iocb->ki_pos += retval; if (retval == 0) retval = -EFAULT; @@ -171,21 +177,28 @@ static ssize_t configfs_bin_read_iter(struct kiocb *iocb, struct iov_iter *to) return retval; } -static int fill_write_buffer(struct configfs_buffer *buffer, +/* Fill [buffer, buffer + pos) with data coming from @from. */ +static int fill_write_buffer(struct configfs_buffer *buffer, loff_t pos, struct iov_iter *from) { + loff_t to_copy; int copied; + u8 *to; if (!buffer->page) buffer->page = (char *)__get_free_pages(GFP_KERNEL, 0); if (!buffer->page) return -ENOMEM; - copied = copy_from_iter(buffer->page, SIMPLE_ATTR_SIZE - 1, from); + to_copy = SIMPLE_ATTR_SIZE - 1 - pos; + if (to_copy <= 0) + return 0; + to = buffer->page + pos; + copied = copy_from_iter(to, to_copy, from); buffer->needs_read_fill = 1; /* if buf is assumed to contain a string, terminate it by \0, * so e.g. sscanf() can scan the string easily */ - buffer->page[copied] = 0; + to[copied] = 0; return copied ? : -EFAULT; } @@ -217,7 +230,7 @@ static ssize_t configfs_write_iter(struct kiocb *iocb, struct iov_iter *from) ssize_t len; mutex_lock(&buffer->mutex); - len = fill_write_buffer(buffer, from); + len = fill_write_buffer(buffer, iocb->ki_pos, from); if (len > 0) len = flush_write_buffer(file, buffer, len); if (len > 0) @@ -272,7 +285,9 @@ static ssize_t configfs_bin_write_iter(struct kiocb *iocb, buffer->bin_buffer_size = end_offset; } - len = copy_from_iter(buffer->bin_buffer, buffer->bin_buffer_size, from); + len = copy_from_iter(buffer->bin_buffer + iocb->ki_pos, + buffer->bin_buffer_size - iocb->ki_pos, from); + iocb->ki_pos += len; out: mutex_unlock(&buffer->mutex); return len ? : -EFAULT; diff --git a/fs/fs_context.c b/fs/fs_context.c index 2834d1afa6..de1985eae5 100644 --- a/fs/fs_context.c +++ b/fs/fs_context.c @@ -79,6 +79,35 @@ static int vfs_parse_sb_flag(struct fs_context *fc, const char *key) return -ENOPARAM; } +/** + * vfs_parse_fs_param_source - Handle setting "source" via parameter + * @fc: The filesystem context to modify + * @param: The parameter + * + * This is a simple helper for filesystems to verify that the "source" they + * accept is sane. + * + * Returns 0 on success, -ENOPARAM if this is not "source" parameter, and + * -EINVAL otherwise. In the event of failure, supplementary error information + * is logged. + */ +int vfs_parse_fs_param_source(struct fs_context *fc, struct fs_parameter *param) +{ + if (strcmp(param->key, "source") != 0) + return -ENOPARAM; + + if (param->type != fs_value_is_string) + return invalf(fc, "Non-string source"); + + if (fc->source) + return invalf(fc, "Multiple sources"); + + fc->source = param->string; + param->string = NULL; + return 0; +} +EXPORT_SYMBOL(vfs_parse_fs_param_source); + /** * vfs_parse_fs_param - Add a single parameter to a superblock config * @fc: The filesystem context to modify @@ -122,15 +151,9 @@ int vfs_parse_fs_param(struct fs_context *fc, struct fs_parameter *param) /* If the filesystem doesn't take any arguments, give it the * default handling of source. */ - if (strcmp(param->key, "source") == 0) { - if (param->type != fs_value_is_string) - return invalf(fc, "VFS: Non-string source"); - if (fc->source) - return invalf(fc, "VFS: Multiple sources"); - fc->source = param->string; - param->string = NULL; - return 0; - } + ret = vfs_parse_fs_param_source(fc, param); + if (ret != -ENOPARAM) + return ret; return invalf(fc, "%s: Unknown parameter '%s'", fc->fs_type->name, param->key); @@ -504,16 +527,11 @@ static int legacy_parse_param(struct fs_context *fc, struct fs_parameter *param) struct legacy_fs_context *ctx = fc->fs_private; unsigned int size = ctx->data_size; size_t len = 0; + int ret; - if (strcmp(param->key, "source") == 0) { - if (param->type != fs_value_is_string) - return invalf(fc, "VFS: Legacy: Non-string source"); - if (fc->source) - return invalf(fc, "VFS: Legacy: Multiple sources"); - fc->source = param->string; - param->string = NULL; - return 0; - } + ret = vfs_parse_fs_param_source(fc, param); + if (ret != -ENOPARAM) + return ret; if (ctx->param_type == LEGACY_FS_MONOLITHIC_PARAMS) return invalf(fc, "VFS: Legacy: Can't mix monolithic and individual options"); diff --git a/fs/hfs/bfind.c b/fs/hfs/bfind.c index 4af318fbda..ef9498a6e8 100644 --- a/fs/hfs/bfind.c +++ b/fs/hfs/bfind.c @@ -25,7 +25,19 @@ int hfs_find_init(struct hfs_btree *tree, struct hfs_find_data *fd) fd->key = ptr + tree->max_key_len + 2; hfs_dbg(BNODE_REFS, "find_init: %d (%p)\n", tree->cnid, __builtin_return_address(0)); - mutex_lock(&tree->tree_lock); + switch (tree->cnid) { + case HFS_CAT_CNID: + mutex_lock_nested(&tree->tree_lock, CATALOG_BTREE_MUTEX); + break; + case HFS_EXT_CNID: + mutex_lock_nested(&tree->tree_lock, EXTENTS_BTREE_MUTEX); + break; + case HFS_ATTR_CNID: + mutex_lock_nested(&tree->tree_lock, ATTR_BTREE_MUTEX); + break; + default: + return -EINVAL; + } return 0; } diff --git a/fs/hfs/bnode.c b/fs/hfs/bnode.c index b63a4df732..c0a73a6ffb 100644 --- a/fs/hfs/bnode.c +++ b/fs/hfs/bnode.c @@ -15,16 +15,31 @@ #include "btree.h" -void hfs_bnode_read(struct hfs_bnode *node, void *buf, - int off, int len) +void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len) { struct page *page; + int pagenum; + int bytes_read; + int bytes_to_read; + void *vaddr; off += node->page_offset; - page = node->page[0]; + pagenum = off >> PAGE_SHIFT; + off &= ~PAGE_MASK; /* compute page offset for the first page */ - memcpy(buf, kmap(page) + off, len); - kunmap(page); + for (bytes_read = 0; bytes_read < len; bytes_read += bytes_to_read) { + if (pagenum >= node->tree->pages_per_bnode) + break; + page = node->page[pagenum]; + bytes_to_read = min_t(int, len - bytes_read, PAGE_SIZE - off); + + vaddr = kmap_atomic(page); + memcpy(buf + bytes_read, vaddr + off, bytes_to_read); + kunmap_atomic(vaddr); + + pagenum++; + off = 0; /* page offset only applies to the first page */ + } } u16 hfs_bnode_read_u16(struct hfs_bnode *node, int off) diff --git a/fs/hfs/btree.h b/fs/hfs/btree.h index 4ba45caf59..0e6baee932 100644 --- a/fs/hfs/btree.h +++ b/fs/hfs/btree.h @@ -13,6 +13,13 @@ typedef int (*btree_keycmp)(const btree_key *, const btree_key *); #define NODE_HASH_SIZE 256 +/* B-tree mutex nested subclasses */ +enum hfs_btree_mutex_classes { + CATALOG_BTREE_MUTEX, + EXTENTS_BTREE_MUTEX, + ATTR_BTREE_MUTEX, +}; + /* A HFS BTree held in memory */ struct hfs_btree { struct super_block *sb; diff --git a/fs/hfs/super.c b/fs/hfs/super.c index 44d07c9e3a..12d9bae393 100644 --- a/fs/hfs/super.c +++ b/fs/hfs/super.c @@ -420,14 +420,12 @@ static int hfs_fill_super(struct super_block *sb, void *data, int silent) if (!res) { if (fd.entrylength > sizeof(rec) || fd.entrylength < 0) { res = -EIO; - goto bail; + goto bail_hfs_find; } hfs_bnode_read(fd.bnode, &rec, fd.entryoffset, fd.entrylength); } - if (res) { - hfs_find_exit(&fd); - goto bail_no_root; - } + if (res) + goto bail_hfs_find; res = -EINVAL; root_inode = hfs_iget(sb, &fd.search_key->cat, &rec); hfs_find_exit(&fd); @@ -443,6 +441,8 @@ static int hfs_fill_super(struct super_block *sb, void *data, int silent) /* everything's okay */ return 0; +bail_hfs_find: + hfs_find_exit(&fd); bail_no_root: pr_err("get root inode failed\n"); bail: diff --git a/fs/io_uring.c b/fs/io_uring.c index d94fb5835a..0cac361bf6 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2016,7 +2016,7 @@ static void io_req_task_submit(struct io_kiocb *req) /* ctx stays valid until unlock, even if we drop all ours ctx->refs */ mutex_lock(&ctx->uring_lock); - if (!(current->flags & PF_EXITING) && !current->in_execve) + if (!(req->task->flags & PF_EXITING) && !req->task->in_execve) __io_queue_sqe(req); else io_req_complete_failed(req, -EFAULT); @@ -6019,11 +6019,13 @@ static bool io_drain_req(struct io_kiocb *req) ret = io_req_prep_async(req); if (ret) - return ret; + goto fail; io_prep_async_link(req); de = kmalloc(sizeof(*de), GFP_KERNEL); if (!de) { - io_req_complete_failed(req, -ENOMEM); + ret = -ENOMEM; +fail: + io_req_complete_failed(req, ret); return true; } diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c index 41da4f14c0..87ccb3438b 100644 --- a/fs/iomap/buffered-io.c +++ b/fs/iomap/buffered-io.c @@ -215,6 +215,7 @@ iomap_read_inline_data(struct inode *inode, struct page *page, if (PageUptodate(page)) return; + BUG_ON(page_has_private(page)); BUG_ON(page->index); BUG_ON(size > PAGE_SIZE - offset_in_page(iomap->inline_data)); @@ -239,7 +240,7 @@ iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data, { struct iomap_readpage_ctx *ctx = data; struct page *page = ctx->cur_page; - struct iomap_page *iop = iomap_page_create(inode, page); + struct iomap_page *iop; bool same_page = false, is_contig = false; loff_t orig_pos = pos; unsigned poff, plen; @@ -252,6 +253,7 @@ iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data, } /* zero post-eof blocks as the page may be mapped */ + iop = iomap_page_create(inode, page); iomap_adjust_read_range(inode, iop, &pos, length, &poff, &plen); if (plen == 0) goto done; @@ -967,7 +969,6 @@ iomap_page_mkwrite_actor(struct inode *inode, loff_t pos, loff_t length, block_commit_write(page, 0, length); } else { WARN_ON_ONCE(!PageUptodate(page)); - iomap_page_create(inode, page); set_page_dirty(page); } @@ -1304,14 +1305,13 @@ iomap_writepage_map(struct iomap_writepage_ctx *wpc, struct writeback_control *wbc, struct inode *inode, struct page *page, u64 end_offset) { - struct iomap_page *iop = to_iomap_page(page); + struct iomap_page *iop = iomap_page_create(inode, page); struct iomap_ioend *ioend, *next; unsigned len = i_blocksize(inode); u64 file_offset; /* file offset of page */ int error = 0, count = 0, i; LIST_HEAD(submit_list); - WARN_ON_ONCE(i_blocks_per_page(inode, page) > 1 && !iop); WARN_ON_ONCE(iop && atomic_read(&iop->write_bytes_pending) != 0); /* diff --git a/fs/iomap/seek.c b/fs/iomap/seek.c index dab1b02eba..ce6fb81085 100644 --- a/fs/iomap/seek.c +++ b/fs/iomap/seek.c @@ -35,23 +35,20 @@ loff_t iomap_seek_hole(struct inode *inode, loff_t offset, const struct iomap_ops *ops) { loff_t size = i_size_read(inode); - loff_t length = size - offset; loff_t ret; /* Nothing to be found before or beyond the end of the file. */ if (offset < 0 || offset >= size) return -ENXIO; - while (length > 0) { - ret = iomap_apply(inode, offset, length, IOMAP_REPORT, ops, - &offset, iomap_seek_hole_actor); + while (offset < size) { + ret = iomap_apply(inode, offset, size - offset, IOMAP_REPORT, + ops, &offset, iomap_seek_hole_actor); if (ret < 0) return ret; if (ret == 0) break; - offset += ret; - length -= ret; } return offset; @@ -83,27 +80,23 @@ loff_t iomap_seek_data(struct inode *inode, loff_t offset, const struct iomap_ops *ops) { loff_t size = i_size_read(inode); - loff_t length = size - offset; loff_t ret; /* Nothing to be found before or beyond the end of the file. */ if (offset < 0 || offset >= size) return -ENXIO; - while (length > 0) { - ret = iomap_apply(inode, offset, length, IOMAP_REPORT, ops, - &offset, iomap_seek_data_actor); + while (offset < size) { + ret = iomap_apply(inode, offset, size - offset, IOMAP_REPORT, + ops, &offset, iomap_seek_data_actor); if (ret < 0) return ret; if (ret == 0) - break; - + return offset; offset += ret; - length -= ret; } - if (length <= 0) - return -ENXIO; - return offset; + /* We've reached the end of the file without finding data */ + return -ENXIO; } EXPORT_SYMBOL_GPL(iomap_seek_data); diff --git a/fs/vboxsf/dir.c b/fs/vboxsf/dir.c index eac6788fc6..c4769a9396 100644 --- a/fs/vboxsf/dir.c +++ b/fs/vboxsf/dir.c @@ -253,7 +253,7 @@ static int vboxsf_dir_instantiate(struct inode *parent, struct dentry *dentry, } static int vboxsf_dir_create(struct inode *parent, struct dentry *dentry, - umode_t mode, int is_dir) + umode_t mode, bool is_dir, bool excl, u64 *handle_ret) { struct vboxsf_inode *sf_parent_i = VBOXSF_I(parent); struct vboxsf_sbi *sbi = VBOXSF_SBI(parent->i_sb); @@ -261,10 +261,12 @@ static int vboxsf_dir_create(struct inode *parent, struct dentry *dentry, int err; params.handle = SHFL_HANDLE_NIL; - params.create_flags = SHFL_CF_ACT_CREATE_IF_NEW | - SHFL_CF_ACT_FAIL_IF_EXISTS | - SHFL_CF_ACCESS_READWRITE | - (is_dir ? SHFL_CF_DIRECTORY : 0); + params.create_flags = SHFL_CF_ACT_CREATE_IF_NEW | SHFL_CF_ACCESS_READWRITE; + if (is_dir) + params.create_flags |= SHFL_CF_DIRECTORY; + if (excl) + params.create_flags |= SHFL_CF_ACT_FAIL_IF_EXISTS; + params.info.attr.mode = (mode & 0777) | (is_dir ? SHFL_TYPE_DIRECTORY : SHFL_TYPE_FILE); params.info.attr.additional = SHFLFSOBJATTRADD_NOTHING; @@ -276,30 +278,81 @@ static int vboxsf_dir_create(struct inode *parent, struct dentry *dentry, if (params.result != SHFL_FILE_CREATED) return -EPERM; - vboxsf_close(sbi->root, params.handle); - err = vboxsf_dir_instantiate(parent, dentry, ¶ms.info); if (err) - return err; + goto out; /* parent directory access/change time changed */ sf_parent_i->force_restat = 1; - return 0; +out: + if (err == 0 && handle_ret) + *handle_ret = params.handle; + else + vboxsf_close(sbi->root, params.handle); + + return err; } static int vboxsf_dir_mkfile(struct user_namespace *mnt_userns, struct inode *parent, struct dentry *dentry, umode_t mode, bool excl) { - return vboxsf_dir_create(parent, dentry, mode, 0); + return vboxsf_dir_create(parent, dentry, mode, false, excl, NULL); } static int vboxsf_dir_mkdir(struct user_namespace *mnt_userns, struct inode *parent, struct dentry *dentry, umode_t mode) { - return vboxsf_dir_create(parent, dentry, mode, 1); + return vboxsf_dir_create(parent, dentry, mode, true, true, NULL); +} + +static int vboxsf_dir_atomic_open(struct inode *parent, struct dentry *dentry, + struct file *file, unsigned int flags, umode_t mode) +{ + struct vboxsf_sbi *sbi = VBOXSF_SBI(parent->i_sb); + struct vboxsf_handle *sf_handle; + struct dentry *res = NULL; + u64 handle; + int err; + + if (d_in_lookup(dentry)) { + res = vboxsf_dir_lookup(parent, dentry, 0); + if (IS_ERR(res)) + return PTR_ERR(res); + + if (res) + dentry = res; + } + + /* Only creates */ + if (!(flags & O_CREAT) || d_really_is_positive(dentry)) + return finish_no_open(file, res); + + err = vboxsf_dir_create(parent, dentry, mode, false, flags & O_EXCL, &handle); + if (err) + goto out; + + sf_handle = vboxsf_create_sf_handle(d_inode(dentry), handle, SHFL_CF_ACCESS_READWRITE); + if (IS_ERR(sf_handle)) { + vboxsf_close(sbi->root, handle); + err = PTR_ERR(sf_handle); + goto out; + } + + err = finish_open(file, dentry, generic_file_open); + if (err) { + /* This also closes the handle passed to vboxsf_create_sf_handle() */ + vboxsf_release_sf_handle(d_inode(dentry), sf_handle); + goto out; + } + + file->private_data = sf_handle; + file->f_mode |= FMODE_CREATED; +out: + dput(res); + return err; } static int vboxsf_dir_unlink(struct inode *parent, struct dentry *dentry) @@ -422,6 +475,7 @@ const struct inode_operations vboxsf_dir_iops = { .lookup = vboxsf_dir_lookup, .create = vboxsf_dir_mkfile, .mkdir = vboxsf_dir_mkdir, + .atomic_open = vboxsf_dir_atomic_open, .rmdir = vboxsf_dir_unlink, .unlink = vboxsf_dir_unlink, .rename = vboxsf_dir_rename, diff --git a/fs/vboxsf/file.c b/fs/vboxsf/file.c index c4ab5996d9..864c2fad23 100644 --- a/fs/vboxsf/file.c +++ b/fs/vboxsf/file.c @@ -20,18 +20,40 @@ struct vboxsf_handle { struct list_head head; }; -static int vboxsf_file_open(struct inode *inode, struct file *file) +struct vboxsf_handle *vboxsf_create_sf_handle(struct inode *inode, + u64 handle, u32 access_flags) { struct vboxsf_inode *sf_i = VBOXSF_I(inode); + struct vboxsf_handle *sf_handle; + + sf_handle = kmalloc(sizeof(*sf_handle), GFP_KERNEL); + if (!sf_handle) + return ERR_PTR(-ENOMEM); + + /* the host may have given us different attr then requested */ + sf_i->force_restat = 1; + + /* init our handle struct and add it to the inode's handles list */ + sf_handle->handle = handle; + sf_handle->root = VBOXSF_SBI(inode->i_sb)->root; + sf_handle->access_flags = access_flags; + kref_init(&sf_handle->refcount); + + mutex_lock(&sf_i->handle_list_mutex); + list_add(&sf_handle->head, &sf_i->handle_list); + mutex_unlock(&sf_i->handle_list_mutex); + + return sf_handle; +} + +static int vboxsf_file_open(struct inode *inode, struct file *file) +{ + struct vboxsf_sbi *sbi = VBOXSF_SBI(inode->i_sb); struct shfl_createparms params = {}; struct vboxsf_handle *sf_handle; u32 access_flags = 0; int err; - sf_handle = kmalloc(sizeof(*sf_handle), GFP_KERNEL); - if (!sf_handle) - return -ENOMEM; - /* * We check the value of params.handle afterwards to find out if * the call succeeded or failed, as the API does not seem to cleanly @@ -83,24 +105,15 @@ static int vboxsf_file_open(struct inode *inode, struct file *file) err = vboxsf_create_at_dentry(file_dentry(file), ¶ms); if (err == 0 && params.handle == SHFL_HANDLE_NIL) err = (params.result == SHFL_FILE_EXISTS) ? -EEXIST : -ENOENT; - if (err) { - kfree(sf_handle); + if (err) return err; + + sf_handle = vboxsf_create_sf_handle(inode, params.handle, access_flags); + if (IS_ERR(sf_handle)) { + vboxsf_close(sbi->root, params.handle); + return PTR_ERR(sf_handle); } - /* the host may have given us different attr then requested */ - sf_i->force_restat = 1; - - /* init our handle struct and add it to the inode's handles list */ - sf_handle->handle = params.handle; - sf_handle->root = VBOXSF_SBI(inode->i_sb)->root; - sf_handle->access_flags = access_flags; - kref_init(&sf_handle->refcount); - - mutex_lock(&sf_i->handle_list_mutex); - list_add(&sf_handle->head, &sf_i->handle_list); - mutex_unlock(&sf_i->handle_list_mutex); - file->private_data = sf_handle; return 0; } @@ -114,22 +127,26 @@ static void vboxsf_handle_release(struct kref *refcount) kfree(sf_handle); } -static int vboxsf_file_release(struct inode *inode, struct file *file) +void vboxsf_release_sf_handle(struct inode *inode, struct vboxsf_handle *sf_handle) { struct vboxsf_inode *sf_i = VBOXSF_I(inode); - struct vboxsf_handle *sf_handle = file->private_data; - - /* - * When a file is closed on our (the guest) side, we want any subsequent - * accesses done on the host side to see all changes done from our side. - */ - filemap_write_and_wait(inode->i_mapping); mutex_lock(&sf_i->handle_list_mutex); list_del(&sf_handle->head); mutex_unlock(&sf_i->handle_list_mutex); kref_put(&sf_handle->refcount, vboxsf_handle_release); +} + +static int vboxsf_file_release(struct inode *inode, struct file *file) +{ + /* + * When a file is closed on our (the guest) side, we want any subsequent + * accesses done on the host side to see all changes done from our side. + */ + filemap_write_and_wait(inode->i_mapping); + + vboxsf_release_sf_handle(inode, file->private_data); return 0; } diff --git a/fs/vboxsf/vfsmod.h b/fs/vboxsf/vfsmod.h index 6a7a9cedeb..9047befa66 100644 --- a/fs/vboxsf/vfsmod.h +++ b/fs/vboxsf/vfsmod.h @@ -18,6 +18,8 @@ #define VBOXSF_SBI(sb) ((struct vboxsf_sbi *)(sb)->s_fs_info) #define VBOXSF_I(i) container_of(i, struct vboxsf_inode, vfs_inode) +struct vboxsf_handle; + struct vboxsf_options { unsigned long ttl; kuid_t uid; @@ -80,6 +82,11 @@ extern const struct file_operations vboxsf_reg_fops; extern const struct address_space_operations vboxsf_reg_aops; extern const struct dentry_operations vboxsf_dentry_ops; +/* from file.c */ +struct vboxsf_handle *vboxsf_create_sf_handle(struct inode *inode, + u64 handle, u32 access_flags); +void vboxsf_release_sf_handle(struct inode *inode, struct vboxsf_handle *sf_handle); + /* from utils.c */ struct inode *vboxsf_new_inode(struct super_block *sb); int vboxsf_init_inode(struct vboxsf_sbi *sbi, struct inode *inode, diff --git a/fs/xfs/libxfs/xfs_ag.c b/fs/xfs/libxfs/xfs_ag.c index 778ec52cce..ee9ec0c50b 100644 --- a/fs/xfs/libxfs/xfs_ag.c +++ b/fs/xfs/libxfs/xfs_ag.c @@ -803,6 +803,14 @@ xfs_ag_shrink_space( args.fsbno = XFS_AGB_TO_FSB(mp, agno, aglen - delta); + /* + * Make sure that the last inode cluster cannot overlap with the new + * end of the AG, even if it's sparse. + */ + error = xfs_ialloc_check_shrink(*tpp, agno, agibp, aglen - delta); + if (error) + return error; + /* * Disable perag reservations so it doesn't cause the allocation request * to fail. We'll reestablish reservation before we return. diff --git a/fs/xfs/libxfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c index d9d7d5137b..191d517259 100644 --- a/fs/xfs/libxfs/xfs_attr.c +++ b/fs/xfs/libxfs/xfs_attr.c @@ -483,7 +483,7 @@ xfs_attr_set_iter( if (error) return error; - /* fallthrough */ + fallthrough; case XFS_DAS_RM_LBLK: /* Set state in case xfs_attr_rmtval_remove returns -EAGAIN */ dac->dela_state = XFS_DAS_RM_LBLK; @@ -496,7 +496,7 @@ xfs_attr_set_iter( return -EAGAIN; } - /* fallthrough */ + fallthrough; case XFS_DAS_RD_LEAF: /* * This is the last step for leaf format. Read the block with @@ -528,7 +528,7 @@ xfs_attr_set_iter( return error; } - /* fallthrough */ + fallthrough; case XFS_DAS_ALLOC_NODE: /* * If there was an out-of-line value, allocate the blocks we @@ -590,7 +590,7 @@ xfs_attr_set_iter( if (error) return error; - /* fallthrough */ + fallthrough; case XFS_DAS_RM_NBLK: /* Set state in case xfs_attr_rmtval_remove returns -EAGAIN */ dac->dela_state = XFS_DAS_RM_NBLK; @@ -603,7 +603,7 @@ xfs_attr_set_iter( return -EAGAIN; } - /* fallthrough */ + fallthrough; case XFS_DAS_CLR_FLAG: /* * The last state for node format. Look up the old attr and @@ -1406,7 +1406,7 @@ xfs_attr_remove_iter( state = dac->da_state; } - /* fallthrough */ + fallthrough; case XFS_DAS_RMTBLK: dac->dela_state = XFS_DAS_RMTBLK; @@ -1441,7 +1441,7 @@ xfs_attr_remove_iter( return -EAGAIN; } - /* fallthrough */ + fallthrough; case XFS_DAS_RM_NAME: /* * If we came here fresh from a transaction roll, reattach all @@ -1469,7 +1469,7 @@ xfs_attr_remove_iter( return -EAGAIN; } - /* fallthrough */ + fallthrough; case XFS_DAS_RM_SHRINK: /* * If the result is small enough, push it all into the inode. diff --git a/fs/xfs/libxfs/xfs_ialloc.c b/fs/xfs/libxfs/xfs_ialloc.c index 57d9cb6329..aaf8805a82 100644 --- a/fs/xfs/libxfs/xfs_ialloc.c +++ b/fs/xfs/libxfs/xfs_ialloc.c @@ -2928,3 +2928,58 @@ xfs_ialloc_calc_rootino( return XFS_AGINO_TO_INO(mp, 0, XFS_AGB_TO_AGINO(mp, first_bno)); } + +/* + * Ensure there are not sparse inode clusters that cross the new EOAG. + * + * This is a no-op for non-spinode filesystems since clusters are always fully + * allocated and checking the bnobt suffices. However, a spinode filesystem + * could have a record where the upper inodes are free blocks. If those blocks + * were removed from the filesystem, the inode record would extend beyond EOAG, + * which will be flagged as corruption. + */ +int +xfs_ialloc_check_shrink( + struct xfs_trans *tp, + xfs_agnumber_t agno, + struct xfs_buf *agibp, + xfs_agblock_t new_length) +{ + struct xfs_inobt_rec_incore rec; + struct xfs_btree_cur *cur; + struct xfs_mount *mp = tp->t_mountp; + struct xfs_perag *pag; + xfs_agino_t agino = XFS_AGB_TO_AGINO(mp, new_length); + int has; + int error; + + if (!xfs_sb_version_hassparseinodes(&mp->m_sb)) + return 0; + + pag = xfs_perag_get(mp, agno); + cur = xfs_inobt_init_cursor(mp, tp, agibp, pag, XFS_BTNUM_INO); + + /* Look up the inobt record that would correspond to the new EOFS. */ + error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &has); + if (error || !has) + goto out; + + error = xfs_inobt_get_rec(cur, &rec, &has); + if (error) + goto out; + + if (!has) { + error = -EFSCORRUPTED; + goto out; + } + + /* If the record covers inodes that would be beyond EOFS, bail out. */ + if (rec.ir_startino + XFS_INODES_PER_CHUNK > agino) { + error = -ENOSPC; + goto out; + } +out: + xfs_btree_del_cursor(cur, error); + xfs_perag_put(pag); + return error; +} diff --git a/fs/xfs/libxfs/xfs_ialloc.h b/fs/xfs/libxfs/xfs_ialloc.h index 9df7c80408..9a2112b4ad 100644 --- a/fs/xfs/libxfs/xfs_ialloc.h +++ b/fs/xfs/libxfs/xfs_ialloc.h @@ -122,4 +122,7 @@ int xfs_ialloc_cluster_alignment(struct xfs_mount *mp); void xfs_ialloc_setup_geometry(struct xfs_mount *mp); xfs_ino_t xfs_ialloc_calc_rootino(struct xfs_mount *mp, int sunit); +int xfs_ialloc_check_shrink(struct xfs_trans *tp, xfs_agnumber_t agno, + struct xfs_buf *agibp, xfs_agblock_t new_length); + #endif /* __XFS_IALLOC_H__ */ diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c index 04ce361688..84ea2e0af9 100644 --- a/fs/xfs/libxfs/xfs_inode_buf.c +++ b/fs/xfs/libxfs/xfs_inode_buf.c @@ -592,23 +592,27 @@ xfs_inode_validate_extsize( /* * This comment describes a historic gap in this verifier function. * - * On older kernels, the extent size hint verifier doesn't check that - * the extent size hint is an integer multiple of the realtime extent - * size on a directory with both RTINHERIT and EXTSZINHERIT flags set. - * The verifier has always enforced the alignment rule for regular - * files with the REALTIME flag set. + * For a directory with both RTINHERIT and EXTSZINHERIT flags set, this + * function has never checked that the extent size hint is an integer + * multiple of the realtime extent size. Since we allow users to set + * this combination on non-rt filesystems /and/ to change the rt + * extent size when adding a rt device to a filesystem, the net effect + * is that users can configure a filesystem anticipating one rt + * geometry and change their minds later. Directories do not use the + * extent size hint, so this is harmless for them. * * If a directory with a misaligned extent size hint is allowed to * propagate that hint into a new regular realtime file, the result * is that the inode cluster buffer verifier will trigger a corruption - * shutdown the next time it is run. + * shutdown the next time it is run, because the verifier has always + * enforced the alignment rule for regular files. * - * Unfortunately, there could be filesystems with these misconfigured - * directories in the wild, so we cannot add a check to this verifier - * at this time because that will result a new source of directory - * corruption errors when reading an existing filesystem. Instead, we - * permit the misconfiguration to pass through the verifiers so that - * callers of this function can correct and mitigate externally. + * Because we allow administrators to set a new rt extent size when + * adding a rt section, we cannot add a check to this verifier because + * that will result a new source of directory corruption errors when + * reading an existing filesystem. Instead, we rely on callers to + * decide when alignment checks are appropriate, and fix things up as + * needed. */ if (rt_flag) diff --git a/fs/xfs/libxfs/xfs_trans_inode.c b/fs/xfs/libxfs/xfs_trans_inode.c index 8d595a5c4a..16f723ebe8 100644 --- a/fs/xfs/libxfs/xfs_trans_inode.c +++ b/fs/xfs/libxfs/xfs_trans_inode.c @@ -143,16 +143,14 @@ xfs_trans_log_inode( } /* - * Inode verifiers on older kernels don't check that the extent size - * hint is an integer multiple of the rt extent size on a directory - * with both rtinherit and extszinherit flags set. If we're logging a - * directory that is misconfigured in this way, clear the hint. + * Inode verifiers do not check that the extent size hint is an integer + * multiple of the rt extent size on a directory with both rtinherit + * and extszinherit flags set. If we're logging a directory that is + * misconfigured in this way, clear the hint. */ if ((ip->i_diflags & XFS_DIFLAG_RTINHERIT) && (ip->i_diflags & XFS_DIFLAG_EXTSZINHERIT) && (ip->i_extsize % ip->i_mount->m_sb.sb_rextsize) > 0) { - xfs_info_once(ip->i_mount, - "Correcting misaligned extent size hint in inode 0x%llx.", ip->i_ino); ip->i_diflags &= ~(XFS_DIFLAG_EXTSIZE | XFS_DIFLAG_EXTSZINHERIT); ip->i_extsize = 0; diff --git a/fs/xfs/scrub/inode.c b/fs/xfs/scrub/inode.c index 61f90b2c94..76fbc7ca4c 100644 --- a/fs/xfs/scrub/inode.c +++ b/fs/xfs/scrub/inode.c @@ -73,11 +73,25 @@ xchk_inode_extsize( uint16_t flags) { xfs_failaddr_t fa; + uint32_t value = be32_to_cpu(dip->di_extsize); - fa = xfs_inode_validate_extsize(sc->mp, be32_to_cpu(dip->di_extsize), - mode, flags); + fa = xfs_inode_validate_extsize(sc->mp, value, mode, flags); if (fa) xchk_ino_set_corrupt(sc, ino); + + /* + * XFS allows a sysadmin to change the rt extent size when adding a rt + * section to a filesystem after formatting. If there are any + * directories with extszinherit and rtinherit set, the hint could + * become misaligned with the new rextsize. The verifier doesn't check + * this, because we allow rtinherit directories even without an rt + * device. Flag this as an administrative warning since we will clean + * this up eventually. + */ + if ((flags & XFS_DIFLAG_RTINHERIT) && + (flags & XFS_DIFLAG_EXTSZINHERIT) && + value % sc->mp->m_sb.sb_rextsize > 0) + xchk_ino_set_warning(sc, ino); } /* diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index a835ceb79b..990b72ae36 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -2763,6 +2763,19 @@ xfs_remove( error = xfs_droplink(tp, ip); if (error) goto out_trans_cancel; + + /* + * Point the unlinked child directory's ".." entry to the root + * directory to eliminate back-references to inodes that may + * get freed before the child directory is closed. If the fs + * gets shrunk, this can lead to dirent inode validation errors. + */ + if (dp->i_ino != tp->t_mountp->m_sb.sb_rootino) { + error = xfs_dir_replace(tp, ip, &xfs_name_dotdot, + tp->t_mountp->m_sb.sb_rootino, 0); + if (error) + return error; + } } else { /* * When removing a non-directory we need to log the parent diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c index 65270e63c0..16039ea10a 100644 --- a/fs/xfs/xfs_ioctl.c +++ b/fs/xfs/xfs_ioctl.c @@ -1065,7 +1065,24 @@ xfs_fill_fsxattr( fileattr_fill_xflags(fa, xfs_ip2xflags(ip)); - fa->fsx_extsize = XFS_FSB_TO_B(mp, ip->i_extsize); + if (ip->i_diflags & XFS_DIFLAG_EXTSIZE) { + fa->fsx_extsize = XFS_FSB_TO_B(mp, ip->i_extsize); + } else if (ip->i_diflags & XFS_DIFLAG_EXTSZINHERIT) { + /* + * Don't let a misaligned extent size hint on a directory + * escape to userspace if it won't pass the setattr checks + * later. + */ + if ((ip->i_diflags & XFS_DIFLAG_RTINHERIT) && + ip->i_extsize % mp->m_sb.sb_rextsize > 0) { + fa->fsx_xflags &= ~(FS_XFLAG_EXTSIZE | + FS_XFLAG_EXTSZINHERIT); + fa->fsx_extsize = 0; + } else { + fa->fsx_extsize = XFS_FSB_TO_B(mp, ip->i_extsize); + } + } + if (ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE) fa->fsx_cowextsize = XFS_FSB_TO_B(mp, ip->i_cowextsize); fa->fsx_projid = ip->i_projid; @@ -1292,10 +1309,10 @@ xfs_ioctl_setattr_check_extsize( new_diflags = xfs_flags2diflags(ip, fa->fsx_xflags); /* - * Inode verifiers on older kernels don't check that the extent size - * hint is an integer multiple of the rt extent size on a directory - * with both rtinherit and extszinherit flags set. Don't let sysadmins - * misconfigure directories. + * Inode verifiers do not check that the extent size hint is an integer + * multiple of the rt extent size on a directory with both rtinherit + * and extszinherit flags set. Don't let sysadmins misconfigure + * directories. */ if ((new_diflags & XFS_DIFLAG_RTINHERIT) && (new_diflags & XFS_DIFLAG_EXTSZINHERIT)) { diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c index 4e7be6b4ca..699066fb90 100644 --- a/fs/xfs/xfs_rtalloc.c +++ b/fs/xfs/xfs_rtalloc.c @@ -923,16 +923,41 @@ xfs_growfs_rt( uint8_t *rsum_cache; /* old summary cache */ sbp = &mp->m_sb; - /* - * Initial error checking. - */ + if (!capable(CAP_SYS_ADMIN)) return -EPERM; - if (mp->m_rtdev_targp == NULL || mp->m_rbmip == NULL || - (nrblocks = in->newblocks) <= sbp->sb_rblocks || - (sbp->sb_rblocks && (in->extsize != sbp->sb_rextsize))) + + /* Needs to have been mounted with an rt device. */ + if (!XFS_IS_REALTIME_MOUNT(mp)) return -EINVAL; - if ((error = xfs_sb_validate_fsb_count(sbp, nrblocks))) + /* + * Mount should fail if the rt bitmap/summary files don't load, but + * we'll check anyway. + */ + if (!mp->m_rbmip || !mp->m_rsumip) + return -EINVAL; + + /* Shrink not supported. */ + if (in->newblocks <= sbp->sb_rblocks) + return -EINVAL; + + /* Can only change rt extent size when adding rt volume. */ + if (sbp->sb_rblocks > 0 && in->extsize != sbp->sb_rextsize) + return -EINVAL; + + /* Range check the extent size. */ + if (XFS_FSB_TO_B(mp, in->extsize) > XFS_MAX_RTEXTSIZE || + XFS_FSB_TO_B(mp, in->extsize) < XFS_MIN_RTEXTSIZE) + return -EINVAL; + + /* Unsupported realtime features. */ + if (xfs_sb_version_hasrmapbt(&mp->m_sb) || + xfs_sb_version_hasreflink(&mp->m_sb)) + return -EOPNOTSUPP; + + nrblocks = in->newblocks; + error = xfs_sb_validate_fsb_count(sbp, nrblocks); + if (error) return error; /* * Read in the last block of the device, make sure it exists. @@ -996,7 +1021,8 @@ xfs_growfs_rt( ((sbp->sb_rextents & ((1 << mp->m_blkbit_log) - 1)) != 0); bmbno < nrbmblocks; bmbno++) { - xfs_trans_t *tp; + struct xfs_trans *tp; + xfs_rfsblock_t nrblocks_step; *nmp = *mp; nsbp = &nmp->m_sb; @@ -1005,10 +1031,9 @@ xfs_growfs_rt( */ nsbp->sb_rextsize = in->extsize; nsbp->sb_rbmblocks = bmbno + 1; - nsbp->sb_rblocks = - XFS_RTMIN(nrblocks, - nsbp->sb_rbmblocks * NBBY * - nsbp->sb_blocksize * nsbp->sb_rextsize); + nrblocks_step = (bmbno + 1) * NBBY * nsbp->sb_blocksize * + nsbp->sb_rextsize; + nsbp->sb_rblocks = min(nrblocks, nrblocks_step); nsbp->sb_rextents = nsbp->sb_rblocks; do_div(nsbp->sb_rextents, nsbp->sb_rextsize); ASSERT(nsbp->sb_rextents != 0); diff --git a/fs/zonefs/super.c b/fs/zonefs/super.c index dbf0363586..70055d486b 100644 --- a/fs/zonefs/super.c +++ b/fs/zonefs/super.c @@ -705,9 +705,6 @@ static ssize_t zonefs_file_dio_append(struct kiocb *iocb, struct iov_iter *from) return 0; bio = bio_alloc(GFP_NOFS, nr_pages); - if (!bio) - return -ENOMEM; - bio_set_dev(bio, bdev); bio->bi_iter.bi_sector = zi->i_zsector; bio->bi_write_hint = iocb->ki_hint; diff --git a/include/acpi/acbuffer.h b/include/acpi/acbuffer.h index 18197c1614..3e8d969b22 100644 --- a/include/acpi/acbuffer.h +++ b/include/acpi/acbuffer.h @@ -207,4 +207,14 @@ struct acpi_pld_info { #define ACPI_PLD_GET_HORIZ_OFFSET(dword) ACPI_GET_BITS (dword, 16, ACPI_16BIT_MASK) #define ACPI_PLD_SET_HORIZ_OFFSET(dword,value) ACPI_SET_BITS (dword, 16, ACPI_16BIT_MASK, value) /* Offset 128+16=144, Len 16 */ +/* Panel position defined in _PLD section of ACPI Specification 6.3 */ + +#define ACPI_PLD_PANEL_TOP 0 +#define ACPI_PLD_PANEL_BOTTOM 1 +#define ACPI_PLD_PANEL_LEFT 2 +#define ACPI_PLD_PANEL_RIGHT 3 +#define ACPI_PLD_PANEL_FRONT 4 +#define ACPI_PLD_PANEL_BACK 5 +#define ACPI_PLD_PANEL_UNKNOWN 6 + #endif /* ACBUFFER_H */ diff --git a/include/acpi/acconfig.h b/include/acpi/acconfig.h index e92f84fa8c..0362cbb723 100644 --- a/include/acpi/acconfig.h +++ b/include/acpi/acconfig.h @@ -188,6 +188,8 @@ #define ACPI_MAX_GSBUS_DATA_SIZE 255 #define ACPI_MAX_GSBUS_BUFFER_SIZE ACPI_SERIAL_HEADER_SIZE + ACPI_MAX_GSBUS_DATA_SIZE +#define ACPI_PRM_INPUT_BUFFER_SIZE 26 + /* _sx_d and _sx_w control methods */ #define ACPI_NUM_sx_d_METHODS 4 diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index 3a82faac57..1ae993fee4 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h @@ -280,6 +280,12 @@ struct acpi_device_power { struct acpi_device_power_state states[ACPI_D_STATE_COUNT]; /* Power states (D0-D3Cold) */ }; +struct acpi_dep_data { + struct list_head node; + acpi_handle supplier; + acpi_handle consumer; +}; + /* Performance Management */ struct acpi_device_perf_flags { @@ -498,8 +504,6 @@ extern int unregister_acpi_notifier(struct notifier_block *); */ int acpi_bus_get_device(acpi_handle handle, struct acpi_device **device); -struct acpi_device *acpi_bus_get_acpi_device(acpi_handle handle); -void acpi_bus_put_acpi_device(struct acpi_device *adev); acpi_status acpi_bus_get_status_handle(acpi_handle handle, unsigned long long *sta); int acpi_bus_get_status(struct acpi_device *device); @@ -586,8 +590,11 @@ struct acpi_pci_root { /* helper */ -bool acpi_dma_supported(struct acpi_device *adev); +bool acpi_dma_supported(const struct acpi_device *adev); enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev); +int acpi_iommu_fwspec_init(struct device *dev, u32 id, + struct fwnode_handle *fwnode, + const struct iommu_ops *ops); int acpi_dma_get_range(struct device *dev, u64 *dma_addr, u64 *offset, u64 *size); int acpi_dma_configure_id(struct device *dev, enum dev_dma_attr attr, @@ -685,6 +692,8 @@ static inline bool acpi_device_can_poweroff(struct acpi_device *adev) bool acpi_dev_hid_uid_match(struct acpi_device *adev, const char *hid2, const char *uid2); +void acpi_dev_clear_dependencies(struct acpi_device *supplier); +struct acpi_device *acpi_dev_get_first_consumer_dev(struct acpi_device *supplier); struct acpi_device * acpi_dev_get_next_match_dev(struct acpi_device *adev, const char *hid, const char *uid, s64 hrv); struct acpi_device * @@ -718,6 +727,13 @@ static inline void acpi_dev_put(struct acpi_device *adev) { put_device(&adev->dev); } + +struct acpi_device *acpi_bus_get_acpi_device(acpi_handle handle); + +static inline void acpi_bus_put_acpi_device(struct acpi_device *adev) +{ + acpi_dev_put(adev); +} #else /* CONFIG_ACPI */ static inline int register_acpi_bus_type(void *bus) { return 0; } diff --git a/include/acpi/acpi_numa.h b/include/acpi/acpi_numa.h index 40a91ce87e..68e4d80c1b 100644 --- a/include/acpi/acpi_numa.h +++ b/include/acpi/acpi_numa.h @@ -43,4 +43,4 @@ static inline void disable_hmat(void) { } #endif /* CONFIG_ACPI_HMAT */ -#endif /* __ACP_NUMA_H */ +#endif /* __ACPI_NUMA_H */ diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h index ce59903c26..ef2872dea0 100644 --- a/include/acpi/actbl1.h +++ b/include/acpi/actbl1.h @@ -327,9 +327,20 @@ struct acpi_cedt_header { enum acpi_cedt_type { ACPI_CEDT_TYPE_CHBS = 0, - ACPI_CEDT_TYPE_RESERVED = 1 + ACPI_CEDT_TYPE_CFMWS = 1, + ACPI_CEDT_TYPE_RESERVED = 2, }; +/* Values for version field above */ + +#define ACPI_CEDT_CHBS_VERSION_CXL11 (0) +#define ACPI_CEDT_CHBS_VERSION_CXL20 (1) + +/* Values for length field above */ + +#define ACPI_CEDT_CHBS_LENGTH_CXL11 (0x2000) +#define ACPI_CEDT_CHBS_LENGTH_CXL20 (0x10000) + /* * CEDT subtables */ @@ -345,6 +356,34 @@ struct acpi_cedt_chbs { u64 length; }; +/* 1: CXL Fixed Memory Window Structure */ + +struct acpi_cedt_cfmws { + struct acpi_cedt_header header; + u32 reserved1; + u64 base_hpa; + u64 window_size; + u8 interleave_ways; + u8 interleave_arithmetic; + u16 reserved2; + u32 granularity; + u16 restrictions; + u16 qtg_id; + u32 interleave_targets[]; +}; + +/* Values for Interleave Arithmetic field above */ + +#define ACPI_CEDT_CFMWS_ARITHMETIC_MODULO (0) + +/* Values for Restrictions field above */ + +#define ACPI_CEDT_CFMWS_RESTRICT_TYPE2 (1) +#define ACPI_CEDT_CFMWS_RESTRICT_TYPE3 (1<<1) +#define ACPI_CEDT_CFMWS_RESTRICT_VOLATILE (1<<2) +#define ACPI_CEDT_CFMWS_RESTRICT_PMEM (1<<3) +#define ACPI_CEDT_CFMWS_RESTRICT_FIXED (1<<4) + /******************************************************************************* * * CPEP - Corrected Platform Error Polling table (ACPI 4.0) diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h index 18cafe3ebd..2069ac38a4 100644 --- a/include/acpi/actbl2.h +++ b/include/acpi/actbl2.h @@ -24,6 +24,7 @@ * file. Useful because they make it more difficult to inadvertently type in * the wrong signature. */ +#define ACPI_SIG_BDAT "BDAT" /* BIOS Data ACPI Table */ #define ACPI_SIG_IORT "IORT" /* IO Remapping Table */ #define ACPI_SIG_IVRS "IVRS" /* I/O Virtualization Reporting Structure */ #define ACPI_SIG_LPIT "LPIT" /* Low Power Idle Table */ @@ -39,11 +40,14 @@ #define ACPI_SIG_PHAT "PHAT" /* Platform Health Assessment Table */ #define ACPI_SIG_PMTT "PMTT" /* Platform Memory Topology Table */ #define ACPI_SIG_PPTT "PPTT" /* Processor Properties Topology Table */ +#define ACPI_SIG_PRMT "PRMT" /* Platform Runtime Mechanism Table */ #define ACPI_SIG_RASF "RASF" /* RAS Feature table */ +#define ACPI_SIG_RGRT "RGRT" /* Regulatory Graphics Resource Table */ #define ACPI_SIG_SBST "SBST" /* Smart Battery Specification Table */ #define ACPI_SIG_SDEI "SDEI" /* Software Delegated Exception Interface Table */ #define ACPI_SIG_SDEV "SDEV" /* Secure Devices table */ #define ACPI_SIG_NHLT "NHLT" /* Non-HDAudio Link Table */ +#define ACPI_SIG_SVKL "SVKL" /* Storage Volume Key Location Table */ /* * All tables must be byte-packed to match the ACPI specification, since @@ -63,6 +67,20 @@ * See http://stackoverflow.com/a/1053662/41661 */ +/******************************************************************************* + * + * BDAT - BIOS Data ACPI Table + * + * Conforms to "BIOS Data ACPI Table", Interface Specification v4.0 Draft 5 + * Nov 2020 + * + ******************************************************************************/ + +struct acpi_table_bdat { + struct acpi_table_header header; + struct acpi_generic_address gas; +}; + /******************************************************************************* * * IORT - IO Remapping Table @@ -446,6 +464,12 @@ struct acpi_ivrs_device_hid { u8 uid_length; }; +/* Values for uid_type above */ + +#define ACPI_IVRS_UID_NOT_PRESENT 0 +#define ACPI_IVRS_UID_IS_INTEGER 1 +#define ACPI_IVRS_UID_IS_STRING 2 + /* 0x20, 0x21, 0x22: I/O Virtualization Memory Definition Block (IVMD) */ struct acpi_ivrs_memory { @@ -763,6 +787,20 @@ struct acpi_madt_multiproc_wakeup { u64 base_address; }; +#define ACPI_MULTIPROC_WAKEUP_MB_OS_SIZE 2032 +#define ACPI_MULTIPROC_WAKEUP_MB_FIRMWARE_SIZE 2048 + +struct acpi_madt_multiproc_wakeup_mailbox { + u16 command; + u16 reserved; /* reserved - must be zero */ + u32 apic_id; + u64 wakeup_vector; + u8 reserved_os[ACPI_MULTIPROC_WAKEUP_MB_OS_SIZE]; /* reserved for OS use */ + u8 reserved_firmware[ACPI_MULTIPROC_WAKEUP_MB_FIRMWARE_SIZE]; /* reserved for firmware use */ +}; + +#define ACPI_MP_WAKE_COMMAND_WAKEUP 1 + /* * Common flags fields for MADT subtables */ @@ -1673,6 +1711,48 @@ struct acpi_pptt_id { u16 spin_rev; }; +/******************************************************************************* + * + * PRMT - Platform Runtime Mechanism Table + * Version 1 + * + ******************************************************************************/ + +struct acpi_table_prmt { + struct acpi_table_header header; /* Common ACPI table header */ +}; + +struct acpi_table_prmt_header { + u8 platform_guid[16]; + u32 module_info_offset; + u32 module_info_count; +}; + +struct acpi_prmt_module_header { + u16 revision; + u16 length; +}; + +struct acpi_prmt_module_info { + u16 revision; + u16 length; + u8 module_guid[16]; + u16 major_rev; + u16 minor_rev; + u16 handler_info_count; + u32 handler_info_offset; + u64 mmio_list_pointer; +}; + +struct acpi_prmt_handler_info { + u16 revision; + u16 length; + u8 handler_guid[16]; + u64 handler_address; + u64 static_data_buffer_address; + u64 acpi_param_buffer_address; +}; + /******************************************************************************* * * RASF - RAS Feature Table (ACPI 5.0) @@ -1769,6 +1849,32 @@ enum acpi_rasf_status { #define ACPI_RASF_ERROR (1<<2) #define ACPI_RASF_STATUS (0x1F<<3) +/******************************************************************************* + * + * RGRT - Regulatory Graphics Resource Table + * Version 1 + * + * Conforms to "ACPI RGRT" available at: + * https://microsoft.github.io/mu/dyn/mu_plus/ms_core_pkg/acpi_RGRT/feature_acpi_rgrt/ + * + ******************************************************************************/ + +struct acpi_table_rgrt { + struct acpi_table_header header; /* Common ACPI table header */ + u16 version; + u8 image_type; + u8 reserved; + u8 image[0]; +}; + +/* image_type values */ + +enum acpi_rgrt_image_type { + ACPI_RGRT_TYPE_RESERVED0 = 0, + ACPI_RGRT_IMAGE_TYPE_PNG = 1, + ACPI_RGRT_TYPE_RESERVED = 2 /* 2 and greater are reserved */ +}; + /******************************************************************************* * * SBST - Smart Battery Specification Table @@ -1899,6 +2005,37 @@ struct acpi_sdev_pcie_path { u8 function; }; +/******************************************************************************* + * + * SVKL - Storage Volume Key Location Table (ACPI 6.4) + * From: "Guest-Host-Communication Interface (GHCI) for Intel + * Trust Domain Extensions (Intel TDX)". + * Version 1 + * + ******************************************************************************/ + +struct acpi_table_svkl { + struct acpi_table_header header; /* Common ACPI table header */ + u32 count; +}; + +struct acpi_svkl_key { + u16 type; + u16 format; + u32 size; + u64 address; +}; + +enum acpi_svkl_type { + ACPI_SVKL_TYPE_MAIN_STORAGE = 0, + ACPI_SVKL_TYPE_RESERVED = 1 /* 1 and greater are reserved */ +}; + +enum acpi_svkl_format { + ACPI_SVKL_FORMAT_RAW_BINARY = 0, + ACPI_SVKL_FORMAT_RESERVED = 1 /* 1 and greater are reserved */ +}; + /* Reset to default packing */ #pragma pack() diff --git a/include/asm-generic/atomic-instrumented.h b/include/asm-generic/atomic-instrumented.h index 888b6cfeed..bc45af52c9 100644 --- a/include/asm-generic/atomic-instrumented.h +++ b/include/asm-generic/atomic-instrumented.h @@ -27,17 +27,13 @@ atomic_read(const atomic_t *v) instrument_atomic_read(v, sizeof(*v)); return arch_atomic_read(v); } -#define atomic_read atomic_read -#if defined(arch_atomic_read_acquire) static __always_inline int atomic_read_acquire(const atomic_t *v) { instrument_atomic_read(v, sizeof(*v)); return arch_atomic_read_acquire(v); } -#define atomic_read_acquire atomic_read_acquire -#endif static __always_inline void atomic_set(atomic_t *v, int i) @@ -45,17 +41,13 @@ atomic_set(atomic_t *v, int i) instrument_atomic_write(v, sizeof(*v)); arch_atomic_set(v, i); } -#define atomic_set atomic_set -#if defined(arch_atomic_set_release) static __always_inline void atomic_set_release(atomic_t *v, int i) { instrument_atomic_write(v, sizeof(*v)); arch_atomic_set_release(v, i); } -#define atomic_set_release atomic_set_release -#endif static __always_inline void atomic_add(int i, atomic_t *v) @@ -63,87 +55,62 @@ atomic_add(int i, atomic_t *v) instrument_atomic_read_write(v, sizeof(*v)); arch_atomic_add(i, v); } -#define atomic_add atomic_add -#if !defined(arch_atomic_add_return_relaxed) || defined(arch_atomic_add_return) static __always_inline int atomic_add_return(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_add_return(i, v); } -#define atomic_add_return atomic_add_return -#endif -#if defined(arch_atomic_add_return_acquire) static __always_inline int atomic_add_return_acquire(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_add_return_acquire(i, v); } -#define atomic_add_return_acquire atomic_add_return_acquire -#endif -#if defined(arch_atomic_add_return_release) static __always_inline int atomic_add_return_release(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_add_return_release(i, v); } -#define atomic_add_return_release atomic_add_return_release -#endif -#if defined(arch_atomic_add_return_relaxed) static __always_inline int atomic_add_return_relaxed(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_add_return_relaxed(i, v); } -#define atomic_add_return_relaxed atomic_add_return_relaxed -#endif -#if !defined(arch_atomic_fetch_add_relaxed) || defined(arch_atomic_fetch_add) static __always_inline int atomic_fetch_add(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_add(i, v); } -#define atomic_fetch_add atomic_fetch_add -#endif -#if defined(arch_atomic_fetch_add_acquire) static __always_inline int atomic_fetch_add_acquire(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_add_acquire(i, v); } -#define atomic_fetch_add_acquire atomic_fetch_add_acquire -#endif -#if defined(arch_atomic_fetch_add_release) static __always_inline int atomic_fetch_add_release(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_add_release(i, v); } -#define atomic_fetch_add_release atomic_fetch_add_release -#endif -#if defined(arch_atomic_fetch_add_relaxed) static __always_inline int atomic_fetch_add_relaxed(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_add_relaxed(i, v); } -#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed -#endif static __always_inline void atomic_sub(int i, atomic_t *v) @@ -151,267 +118,188 @@ atomic_sub(int i, atomic_t *v) instrument_atomic_read_write(v, sizeof(*v)); arch_atomic_sub(i, v); } -#define atomic_sub atomic_sub -#if !defined(arch_atomic_sub_return_relaxed) || defined(arch_atomic_sub_return) static __always_inline int atomic_sub_return(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_sub_return(i, v); } -#define atomic_sub_return atomic_sub_return -#endif -#if defined(arch_atomic_sub_return_acquire) static __always_inline int atomic_sub_return_acquire(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_sub_return_acquire(i, v); } -#define atomic_sub_return_acquire atomic_sub_return_acquire -#endif -#if defined(arch_atomic_sub_return_release) static __always_inline int atomic_sub_return_release(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_sub_return_release(i, v); } -#define atomic_sub_return_release atomic_sub_return_release -#endif -#if defined(arch_atomic_sub_return_relaxed) static __always_inline int atomic_sub_return_relaxed(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_sub_return_relaxed(i, v); } -#define atomic_sub_return_relaxed atomic_sub_return_relaxed -#endif -#if !defined(arch_atomic_fetch_sub_relaxed) || defined(arch_atomic_fetch_sub) static __always_inline int atomic_fetch_sub(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_sub(i, v); } -#define atomic_fetch_sub atomic_fetch_sub -#endif -#if defined(arch_atomic_fetch_sub_acquire) static __always_inline int atomic_fetch_sub_acquire(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_sub_acquire(i, v); } -#define atomic_fetch_sub_acquire atomic_fetch_sub_acquire -#endif -#if defined(arch_atomic_fetch_sub_release) static __always_inline int atomic_fetch_sub_release(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_sub_release(i, v); } -#define atomic_fetch_sub_release atomic_fetch_sub_release -#endif -#if defined(arch_atomic_fetch_sub_relaxed) static __always_inline int atomic_fetch_sub_relaxed(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_sub_relaxed(i, v); } -#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed -#endif -#if defined(arch_atomic_inc) static __always_inline void atomic_inc(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); arch_atomic_inc(v); } -#define atomic_inc atomic_inc -#endif -#if defined(arch_atomic_inc_return) static __always_inline int atomic_inc_return(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_inc_return(v); } -#define atomic_inc_return atomic_inc_return -#endif -#if defined(arch_atomic_inc_return_acquire) static __always_inline int atomic_inc_return_acquire(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_inc_return_acquire(v); } -#define atomic_inc_return_acquire atomic_inc_return_acquire -#endif -#if defined(arch_atomic_inc_return_release) static __always_inline int atomic_inc_return_release(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_inc_return_release(v); } -#define atomic_inc_return_release atomic_inc_return_release -#endif -#if defined(arch_atomic_inc_return_relaxed) static __always_inline int atomic_inc_return_relaxed(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_inc_return_relaxed(v); } -#define atomic_inc_return_relaxed atomic_inc_return_relaxed -#endif -#if defined(arch_atomic_fetch_inc) static __always_inline int atomic_fetch_inc(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_inc(v); } -#define atomic_fetch_inc atomic_fetch_inc -#endif -#if defined(arch_atomic_fetch_inc_acquire) static __always_inline int atomic_fetch_inc_acquire(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_inc_acquire(v); } -#define atomic_fetch_inc_acquire atomic_fetch_inc_acquire -#endif -#if defined(arch_atomic_fetch_inc_release) static __always_inline int atomic_fetch_inc_release(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_inc_release(v); } -#define atomic_fetch_inc_release atomic_fetch_inc_release -#endif -#if defined(arch_atomic_fetch_inc_relaxed) static __always_inline int atomic_fetch_inc_relaxed(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_inc_relaxed(v); } -#define atomic_fetch_inc_relaxed atomic_fetch_inc_relaxed -#endif -#if defined(arch_atomic_dec) static __always_inline void atomic_dec(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); arch_atomic_dec(v); } -#define atomic_dec atomic_dec -#endif -#if defined(arch_atomic_dec_return) static __always_inline int atomic_dec_return(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_dec_return(v); } -#define atomic_dec_return atomic_dec_return -#endif -#if defined(arch_atomic_dec_return_acquire) static __always_inline int atomic_dec_return_acquire(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_dec_return_acquire(v); } -#define atomic_dec_return_acquire atomic_dec_return_acquire -#endif -#if defined(arch_atomic_dec_return_release) static __always_inline int atomic_dec_return_release(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_dec_return_release(v); } -#define atomic_dec_return_release atomic_dec_return_release -#endif -#if defined(arch_atomic_dec_return_relaxed) static __always_inline int atomic_dec_return_relaxed(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_dec_return_relaxed(v); } -#define atomic_dec_return_relaxed atomic_dec_return_relaxed -#endif -#if defined(arch_atomic_fetch_dec) static __always_inline int atomic_fetch_dec(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_dec(v); } -#define atomic_fetch_dec atomic_fetch_dec -#endif -#if defined(arch_atomic_fetch_dec_acquire) static __always_inline int atomic_fetch_dec_acquire(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_dec_acquire(v); } -#define atomic_fetch_dec_acquire atomic_fetch_dec_acquire -#endif -#if defined(arch_atomic_fetch_dec_release) static __always_inline int atomic_fetch_dec_release(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_dec_release(v); } -#define atomic_fetch_dec_release atomic_fetch_dec_release -#endif -#if defined(arch_atomic_fetch_dec_relaxed) static __always_inline int atomic_fetch_dec_relaxed(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_dec_relaxed(v); } -#define atomic_fetch_dec_relaxed atomic_fetch_dec_relaxed -#endif static __always_inline void atomic_and(int i, atomic_t *v) @@ -419,97 +307,69 @@ atomic_and(int i, atomic_t *v) instrument_atomic_read_write(v, sizeof(*v)); arch_atomic_and(i, v); } -#define atomic_and atomic_and -#if !defined(arch_atomic_fetch_and_relaxed) || defined(arch_atomic_fetch_and) static __always_inline int atomic_fetch_and(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_and(i, v); } -#define atomic_fetch_and atomic_fetch_and -#endif -#if defined(arch_atomic_fetch_and_acquire) static __always_inline int atomic_fetch_and_acquire(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_and_acquire(i, v); } -#define atomic_fetch_and_acquire atomic_fetch_and_acquire -#endif -#if defined(arch_atomic_fetch_and_release) static __always_inline int atomic_fetch_and_release(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_and_release(i, v); } -#define atomic_fetch_and_release atomic_fetch_and_release -#endif -#if defined(arch_atomic_fetch_and_relaxed) static __always_inline int atomic_fetch_and_relaxed(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_and_relaxed(i, v); } -#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed -#endif -#if defined(arch_atomic_andnot) static __always_inline void atomic_andnot(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); arch_atomic_andnot(i, v); } -#define atomic_andnot atomic_andnot -#endif -#if defined(arch_atomic_fetch_andnot) static __always_inline int atomic_fetch_andnot(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_andnot(i, v); } -#define atomic_fetch_andnot atomic_fetch_andnot -#endif -#if defined(arch_atomic_fetch_andnot_acquire) static __always_inline int atomic_fetch_andnot_acquire(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_andnot_acquire(i, v); } -#define atomic_fetch_andnot_acquire atomic_fetch_andnot_acquire -#endif -#if defined(arch_atomic_fetch_andnot_release) static __always_inline int atomic_fetch_andnot_release(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_andnot_release(i, v); } -#define atomic_fetch_andnot_release atomic_fetch_andnot_release -#endif -#if defined(arch_atomic_fetch_andnot_relaxed) static __always_inline int atomic_fetch_andnot_relaxed(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_andnot_relaxed(i, v); } -#define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed -#endif static __always_inline void atomic_or(int i, atomic_t *v) @@ -517,47 +377,34 @@ atomic_or(int i, atomic_t *v) instrument_atomic_read_write(v, sizeof(*v)); arch_atomic_or(i, v); } -#define atomic_or atomic_or -#if !defined(arch_atomic_fetch_or_relaxed) || defined(arch_atomic_fetch_or) static __always_inline int atomic_fetch_or(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_or(i, v); } -#define atomic_fetch_or atomic_fetch_or -#endif -#if defined(arch_atomic_fetch_or_acquire) static __always_inline int atomic_fetch_or_acquire(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_or_acquire(i, v); } -#define atomic_fetch_or_acquire atomic_fetch_or_acquire -#endif -#if defined(arch_atomic_fetch_or_release) static __always_inline int atomic_fetch_or_release(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_or_release(i, v); } -#define atomic_fetch_or_release atomic_fetch_or_release -#endif -#if defined(arch_atomic_fetch_or_relaxed) static __always_inline int atomic_fetch_or_relaxed(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_or_relaxed(i, v); } -#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed -#endif static __always_inline void atomic_xor(int i, atomic_t *v) @@ -565,129 +412,91 @@ atomic_xor(int i, atomic_t *v) instrument_atomic_read_write(v, sizeof(*v)); arch_atomic_xor(i, v); } -#define atomic_xor atomic_xor -#if !defined(arch_atomic_fetch_xor_relaxed) || defined(arch_atomic_fetch_xor) static __always_inline int atomic_fetch_xor(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_xor(i, v); } -#define atomic_fetch_xor atomic_fetch_xor -#endif -#if defined(arch_atomic_fetch_xor_acquire) static __always_inline int atomic_fetch_xor_acquire(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_xor_acquire(i, v); } -#define atomic_fetch_xor_acquire atomic_fetch_xor_acquire -#endif -#if defined(arch_atomic_fetch_xor_release) static __always_inline int atomic_fetch_xor_release(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_xor_release(i, v); } -#define atomic_fetch_xor_release atomic_fetch_xor_release -#endif -#if defined(arch_atomic_fetch_xor_relaxed) static __always_inline int atomic_fetch_xor_relaxed(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_xor_relaxed(i, v); } -#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed -#endif -#if !defined(arch_atomic_xchg_relaxed) || defined(arch_atomic_xchg) static __always_inline int atomic_xchg(atomic_t *v, int i) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_xchg(v, i); } -#define atomic_xchg atomic_xchg -#endif -#if defined(arch_atomic_xchg_acquire) static __always_inline int atomic_xchg_acquire(atomic_t *v, int i) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_xchg_acquire(v, i); } -#define atomic_xchg_acquire atomic_xchg_acquire -#endif -#if defined(arch_atomic_xchg_release) static __always_inline int atomic_xchg_release(atomic_t *v, int i) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_xchg_release(v, i); } -#define atomic_xchg_release atomic_xchg_release -#endif -#if defined(arch_atomic_xchg_relaxed) static __always_inline int atomic_xchg_relaxed(atomic_t *v, int i) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_xchg_relaxed(v, i); } -#define atomic_xchg_relaxed atomic_xchg_relaxed -#endif -#if !defined(arch_atomic_cmpxchg_relaxed) || defined(arch_atomic_cmpxchg) static __always_inline int atomic_cmpxchg(atomic_t *v, int old, int new) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_cmpxchg(v, old, new); } -#define atomic_cmpxchg atomic_cmpxchg -#endif -#if defined(arch_atomic_cmpxchg_acquire) static __always_inline int atomic_cmpxchg_acquire(atomic_t *v, int old, int new) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_cmpxchg_acquire(v, old, new); } -#define atomic_cmpxchg_acquire atomic_cmpxchg_acquire -#endif -#if defined(arch_atomic_cmpxchg_release) static __always_inline int atomic_cmpxchg_release(atomic_t *v, int old, int new) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_cmpxchg_release(v, old, new); } -#define atomic_cmpxchg_release atomic_cmpxchg_release -#endif -#if defined(arch_atomic_cmpxchg_relaxed) static __always_inline int atomic_cmpxchg_relaxed(atomic_t *v, int old, int new) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_cmpxchg_relaxed(v, old, new); } -#define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed -#endif -#if defined(arch_atomic_try_cmpxchg) static __always_inline bool atomic_try_cmpxchg(atomic_t *v, int *old, int new) { @@ -695,10 +504,7 @@ atomic_try_cmpxchg(atomic_t *v, int *old, int new) instrument_atomic_read_write(old, sizeof(*old)); return arch_atomic_try_cmpxchg(v, old, new); } -#define atomic_try_cmpxchg atomic_try_cmpxchg -#endif -#if defined(arch_atomic_try_cmpxchg_acquire) static __always_inline bool atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new) { @@ -706,10 +512,7 @@ atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new) instrument_atomic_read_write(old, sizeof(*old)); return arch_atomic_try_cmpxchg_acquire(v, old, new); } -#define atomic_try_cmpxchg_acquire atomic_try_cmpxchg_acquire -#endif -#if defined(arch_atomic_try_cmpxchg_release) static __always_inline bool atomic_try_cmpxchg_release(atomic_t *v, int *old, int new) { @@ -717,10 +520,7 @@ atomic_try_cmpxchg_release(atomic_t *v, int *old, int new) instrument_atomic_read_write(old, sizeof(*old)); return arch_atomic_try_cmpxchg_release(v, old, new); } -#define atomic_try_cmpxchg_release atomic_try_cmpxchg_release -#endif -#if defined(arch_atomic_try_cmpxchg_relaxed) static __always_inline bool atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new) { @@ -728,108 +528,76 @@ atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new) instrument_atomic_read_write(old, sizeof(*old)); return arch_atomic_try_cmpxchg_relaxed(v, old, new); } -#define atomic_try_cmpxchg_relaxed atomic_try_cmpxchg_relaxed -#endif -#if defined(arch_atomic_sub_and_test) static __always_inline bool atomic_sub_and_test(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_sub_and_test(i, v); } -#define atomic_sub_and_test atomic_sub_and_test -#endif -#if defined(arch_atomic_dec_and_test) static __always_inline bool atomic_dec_and_test(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_dec_and_test(v); } -#define atomic_dec_and_test atomic_dec_and_test -#endif -#if defined(arch_atomic_inc_and_test) static __always_inline bool atomic_inc_and_test(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_inc_and_test(v); } -#define atomic_inc_and_test atomic_inc_and_test -#endif -#if defined(arch_atomic_add_negative) static __always_inline bool atomic_add_negative(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_add_negative(i, v); } -#define atomic_add_negative atomic_add_negative -#endif -#if defined(arch_atomic_fetch_add_unless) static __always_inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_add_unless(v, a, u); } -#define atomic_fetch_add_unless atomic_fetch_add_unless -#endif -#if defined(arch_atomic_add_unless) static __always_inline bool atomic_add_unless(atomic_t *v, int a, int u) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_add_unless(v, a, u); } -#define atomic_add_unless atomic_add_unless -#endif -#if defined(arch_atomic_inc_not_zero) static __always_inline bool atomic_inc_not_zero(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_inc_not_zero(v); } -#define atomic_inc_not_zero atomic_inc_not_zero -#endif -#if defined(arch_atomic_inc_unless_negative) static __always_inline bool atomic_inc_unless_negative(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_inc_unless_negative(v); } -#define atomic_inc_unless_negative atomic_inc_unless_negative -#endif -#if defined(arch_atomic_dec_unless_positive) static __always_inline bool atomic_dec_unless_positive(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_dec_unless_positive(v); } -#define atomic_dec_unless_positive atomic_dec_unless_positive -#endif -#if defined(arch_atomic_dec_if_positive) static __always_inline int atomic_dec_if_positive(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_dec_if_positive(v); } -#define atomic_dec_if_positive atomic_dec_if_positive -#endif static __always_inline s64 atomic64_read(const atomic64_t *v) @@ -837,17 +605,13 @@ atomic64_read(const atomic64_t *v) instrument_atomic_read(v, sizeof(*v)); return arch_atomic64_read(v); } -#define atomic64_read atomic64_read -#if defined(arch_atomic64_read_acquire) static __always_inline s64 atomic64_read_acquire(const atomic64_t *v) { instrument_atomic_read(v, sizeof(*v)); return arch_atomic64_read_acquire(v); } -#define atomic64_read_acquire atomic64_read_acquire -#endif static __always_inline void atomic64_set(atomic64_t *v, s64 i) @@ -855,17 +619,13 @@ atomic64_set(atomic64_t *v, s64 i) instrument_atomic_write(v, sizeof(*v)); arch_atomic64_set(v, i); } -#define atomic64_set atomic64_set -#if defined(arch_atomic64_set_release) static __always_inline void atomic64_set_release(atomic64_t *v, s64 i) { instrument_atomic_write(v, sizeof(*v)); arch_atomic64_set_release(v, i); } -#define atomic64_set_release atomic64_set_release -#endif static __always_inline void atomic64_add(s64 i, atomic64_t *v) @@ -873,87 +633,62 @@ atomic64_add(s64 i, atomic64_t *v) instrument_atomic_read_write(v, sizeof(*v)); arch_atomic64_add(i, v); } -#define atomic64_add atomic64_add -#if !defined(arch_atomic64_add_return_relaxed) || defined(arch_atomic64_add_return) static __always_inline s64 atomic64_add_return(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_add_return(i, v); } -#define atomic64_add_return atomic64_add_return -#endif -#if defined(arch_atomic64_add_return_acquire) static __always_inline s64 atomic64_add_return_acquire(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_add_return_acquire(i, v); } -#define atomic64_add_return_acquire atomic64_add_return_acquire -#endif -#if defined(arch_atomic64_add_return_release) static __always_inline s64 atomic64_add_return_release(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_add_return_release(i, v); } -#define atomic64_add_return_release atomic64_add_return_release -#endif -#if defined(arch_atomic64_add_return_relaxed) static __always_inline s64 atomic64_add_return_relaxed(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_add_return_relaxed(i, v); } -#define atomic64_add_return_relaxed atomic64_add_return_relaxed -#endif -#if !defined(arch_atomic64_fetch_add_relaxed) || defined(arch_atomic64_fetch_add) static __always_inline s64 atomic64_fetch_add(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_add(i, v); } -#define atomic64_fetch_add atomic64_fetch_add -#endif -#if defined(arch_atomic64_fetch_add_acquire) static __always_inline s64 atomic64_fetch_add_acquire(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_add_acquire(i, v); } -#define atomic64_fetch_add_acquire atomic64_fetch_add_acquire -#endif -#if defined(arch_atomic64_fetch_add_release) static __always_inline s64 atomic64_fetch_add_release(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_add_release(i, v); } -#define atomic64_fetch_add_release atomic64_fetch_add_release -#endif -#if defined(arch_atomic64_fetch_add_relaxed) static __always_inline s64 atomic64_fetch_add_relaxed(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_add_relaxed(i, v); } -#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed -#endif static __always_inline void atomic64_sub(s64 i, atomic64_t *v) @@ -961,267 +696,188 @@ atomic64_sub(s64 i, atomic64_t *v) instrument_atomic_read_write(v, sizeof(*v)); arch_atomic64_sub(i, v); } -#define atomic64_sub atomic64_sub -#if !defined(arch_atomic64_sub_return_relaxed) || defined(arch_atomic64_sub_return) static __always_inline s64 atomic64_sub_return(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_sub_return(i, v); } -#define atomic64_sub_return atomic64_sub_return -#endif -#if defined(arch_atomic64_sub_return_acquire) static __always_inline s64 atomic64_sub_return_acquire(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_sub_return_acquire(i, v); } -#define atomic64_sub_return_acquire atomic64_sub_return_acquire -#endif -#if defined(arch_atomic64_sub_return_release) static __always_inline s64 atomic64_sub_return_release(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_sub_return_release(i, v); } -#define atomic64_sub_return_release atomic64_sub_return_release -#endif -#if defined(arch_atomic64_sub_return_relaxed) static __always_inline s64 atomic64_sub_return_relaxed(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_sub_return_relaxed(i, v); } -#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed -#endif -#if !defined(arch_atomic64_fetch_sub_relaxed) || defined(arch_atomic64_fetch_sub) static __always_inline s64 atomic64_fetch_sub(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_sub(i, v); } -#define atomic64_fetch_sub atomic64_fetch_sub -#endif -#if defined(arch_atomic64_fetch_sub_acquire) static __always_inline s64 atomic64_fetch_sub_acquire(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_sub_acquire(i, v); } -#define atomic64_fetch_sub_acquire atomic64_fetch_sub_acquire -#endif -#if defined(arch_atomic64_fetch_sub_release) static __always_inline s64 atomic64_fetch_sub_release(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_sub_release(i, v); } -#define atomic64_fetch_sub_release atomic64_fetch_sub_release -#endif -#if defined(arch_atomic64_fetch_sub_relaxed) static __always_inline s64 atomic64_fetch_sub_relaxed(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_sub_relaxed(i, v); } -#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed -#endif -#if defined(arch_atomic64_inc) static __always_inline void atomic64_inc(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); arch_atomic64_inc(v); } -#define atomic64_inc atomic64_inc -#endif -#if defined(arch_atomic64_inc_return) static __always_inline s64 atomic64_inc_return(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_inc_return(v); } -#define atomic64_inc_return atomic64_inc_return -#endif -#if defined(arch_atomic64_inc_return_acquire) static __always_inline s64 atomic64_inc_return_acquire(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_inc_return_acquire(v); } -#define atomic64_inc_return_acquire atomic64_inc_return_acquire -#endif -#if defined(arch_atomic64_inc_return_release) static __always_inline s64 atomic64_inc_return_release(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_inc_return_release(v); } -#define atomic64_inc_return_release atomic64_inc_return_release -#endif -#if defined(arch_atomic64_inc_return_relaxed) static __always_inline s64 atomic64_inc_return_relaxed(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_inc_return_relaxed(v); } -#define atomic64_inc_return_relaxed atomic64_inc_return_relaxed -#endif -#if defined(arch_atomic64_fetch_inc) static __always_inline s64 atomic64_fetch_inc(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_inc(v); } -#define atomic64_fetch_inc atomic64_fetch_inc -#endif -#if defined(arch_atomic64_fetch_inc_acquire) static __always_inline s64 atomic64_fetch_inc_acquire(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_inc_acquire(v); } -#define atomic64_fetch_inc_acquire atomic64_fetch_inc_acquire -#endif -#if defined(arch_atomic64_fetch_inc_release) static __always_inline s64 atomic64_fetch_inc_release(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_inc_release(v); } -#define atomic64_fetch_inc_release atomic64_fetch_inc_release -#endif -#if defined(arch_atomic64_fetch_inc_relaxed) static __always_inline s64 atomic64_fetch_inc_relaxed(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_inc_relaxed(v); } -#define atomic64_fetch_inc_relaxed atomic64_fetch_inc_relaxed -#endif -#if defined(arch_atomic64_dec) static __always_inline void atomic64_dec(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); arch_atomic64_dec(v); } -#define atomic64_dec atomic64_dec -#endif -#if defined(arch_atomic64_dec_return) static __always_inline s64 atomic64_dec_return(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_dec_return(v); } -#define atomic64_dec_return atomic64_dec_return -#endif -#if defined(arch_atomic64_dec_return_acquire) static __always_inline s64 atomic64_dec_return_acquire(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_dec_return_acquire(v); } -#define atomic64_dec_return_acquire atomic64_dec_return_acquire -#endif -#if defined(arch_atomic64_dec_return_release) static __always_inline s64 atomic64_dec_return_release(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_dec_return_release(v); } -#define atomic64_dec_return_release atomic64_dec_return_release -#endif -#if defined(arch_atomic64_dec_return_relaxed) static __always_inline s64 atomic64_dec_return_relaxed(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_dec_return_relaxed(v); } -#define atomic64_dec_return_relaxed atomic64_dec_return_relaxed -#endif -#if defined(arch_atomic64_fetch_dec) static __always_inline s64 atomic64_fetch_dec(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_dec(v); } -#define atomic64_fetch_dec atomic64_fetch_dec -#endif -#if defined(arch_atomic64_fetch_dec_acquire) static __always_inline s64 atomic64_fetch_dec_acquire(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_dec_acquire(v); } -#define atomic64_fetch_dec_acquire atomic64_fetch_dec_acquire -#endif -#if defined(arch_atomic64_fetch_dec_release) static __always_inline s64 atomic64_fetch_dec_release(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_dec_release(v); } -#define atomic64_fetch_dec_release atomic64_fetch_dec_release -#endif -#if defined(arch_atomic64_fetch_dec_relaxed) static __always_inline s64 atomic64_fetch_dec_relaxed(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_dec_relaxed(v); } -#define atomic64_fetch_dec_relaxed atomic64_fetch_dec_relaxed -#endif static __always_inline void atomic64_and(s64 i, atomic64_t *v) @@ -1229,97 +885,69 @@ atomic64_and(s64 i, atomic64_t *v) instrument_atomic_read_write(v, sizeof(*v)); arch_atomic64_and(i, v); } -#define atomic64_and atomic64_and -#if !defined(arch_atomic64_fetch_and_relaxed) || defined(arch_atomic64_fetch_and) static __always_inline s64 atomic64_fetch_and(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_and(i, v); } -#define atomic64_fetch_and atomic64_fetch_and -#endif -#if defined(arch_atomic64_fetch_and_acquire) static __always_inline s64 atomic64_fetch_and_acquire(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_and_acquire(i, v); } -#define atomic64_fetch_and_acquire atomic64_fetch_and_acquire -#endif -#if defined(arch_atomic64_fetch_and_release) static __always_inline s64 atomic64_fetch_and_release(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_and_release(i, v); } -#define atomic64_fetch_and_release atomic64_fetch_and_release -#endif -#if defined(arch_atomic64_fetch_and_relaxed) static __always_inline s64 atomic64_fetch_and_relaxed(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_and_relaxed(i, v); } -#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed -#endif -#if defined(arch_atomic64_andnot) static __always_inline void atomic64_andnot(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); arch_atomic64_andnot(i, v); } -#define atomic64_andnot atomic64_andnot -#endif -#if defined(arch_atomic64_fetch_andnot) static __always_inline s64 atomic64_fetch_andnot(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_andnot(i, v); } -#define atomic64_fetch_andnot atomic64_fetch_andnot -#endif -#if defined(arch_atomic64_fetch_andnot_acquire) static __always_inline s64 atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_andnot_acquire(i, v); } -#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot_acquire -#endif -#if defined(arch_atomic64_fetch_andnot_release) static __always_inline s64 atomic64_fetch_andnot_release(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_andnot_release(i, v); } -#define atomic64_fetch_andnot_release atomic64_fetch_andnot_release -#endif -#if defined(arch_atomic64_fetch_andnot_relaxed) static __always_inline s64 atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_andnot_relaxed(i, v); } -#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed -#endif static __always_inline void atomic64_or(s64 i, atomic64_t *v) @@ -1327,47 +955,34 @@ atomic64_or(s64 i, atomic64_t *v) instrument_atomic_read_write(v, sizeof(*v)); arch_atomic64_or(i, v); } -#define atomic64_or atomic64_or -#if !defined(arch_atomic64_fetch_or_relaxed) || defined(arch_atomic64_fetch_or) static __always_inline s64 atomic64_fetch_or(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_or(i, v); } -#define atomic64_fetch_or atomic64_fetch_or -#endif -#if defined(arch_atomic64_fetch_or_acquire) static __always_inline s64 atomic64_fetch_or_acquire(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_or_acquire(i, v); } -#define atomic64_fetch_or_acquire atomic64_fetch_or_acquire -#endif -#if defined(arch_atomic64_fetch_or_release) static __always_inline s64 atomic64_fetch_or_release(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_or_release(i, v); } -#define atomic64_fetch_or_release atomic64_fetch_or_release -#endif -#if defined(arch_atomic64_fetch_or_relaxed) static __always_inline s64 atomic64_fetch_or_relaxed(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_or_relaxed(i, v); } -#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed -#endif static __always_inline void atomic64_xor(s64 i, atomic64_t *v) @@ -1375,129 +990,91 @@ atomic64_xor(s64 i, atomic64_t *v) instrument_atomic_read_write(v, sizeof(*v)); arch_atomic64_xor(i, v); } -#define atomic64_xor atomic64_xor -#if !defined(arch_atomic64_fetch_xor_relaxed) || defined(arch_atomic64_fetch_xor) static __always_inline s64 atomic64_fetch_xor(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_xor(i, v); } -#define atomic64_fetch_xor atomic64_fetch_xor -#endif -#if defined(arch_atomic64_fetch_xor_acquire) static __always_inline s64 atomic64_fetch_xor_acquire(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_xor_acquire(i, v); } -#define atomic64_fetch_xor_acquire atomic64_fetch_xor_acquire -#endif -#if defined(arch_atomic64_fetch_xor_release) static __always_inline s64 atomic64_fetch_xor_release(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_xor_release(i, v); } -#define atomic64_fetch_xor_release atomic64_fetch_xor_release -#endif -#if defined(arch_atomic64_fetch_xor_relaxed) static __always_inline s64 atomic64_fetch_xor_relaxed(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_xor_relaxed(i, v); } -#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed -#endif -#if !defined(arch_atomic64_xchg_relaxed) || defined(arch_atomic64_xchg) static __always_inline s64 atomic64_xchg(atomic64_t *v, s64 i) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_xchg(v, i); } -#define atomic64_xchg atomic64_xchg -#endif -#if defined(arch_atomic64_xchg_acquire) static __always_inline s64 atomic64_xchg_acquire(atomic64_t *v, s64 i) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_xchg_acquire(v, i); } -#define atomic64_xchg_acquire atomic64_xchg_acquire -#endif -#if defined(arch_atomic64_xchg_release) static __always_inline s64 atomic64_xchg_release(atomic64_t *v, s64 i) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_xchg_release(v, i); } -#define atomic64_xchg_release atomic64_xchg_release -#endif -#if defined(arch_atomic64_xchg_relaxed) static __always_inline s64 atomic64_xchg_relaxed(atomic64_t *v, s64 i) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_xchg_relaxed(v, i); } -#define atomic64_xchg_relaxed atomic64_xchg_relaxed -#endif -#if !defined(arch_atomic64_cmpxchg_relaxed) || defined(arch_atomic64_cmpxchg) static __always_inline s64 atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_cmpxchg(v, old, new); } -#define atomic64_cmpxchg atomic64_cmpxchg -#endif -#if defined(arch_atomic64_cmpxchg_acquire) static __always_inline s64 atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_cmpxchg_acquire(v, old, new); } -#define atomic64_cmpxchg_acquire atomic64_cmpxchg_acquire -#endif -#if defined(arch_atomic64_cmpxchg_release) static __always_inline s64 atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_cmpxchg_release(v, old, new); } -#define atomic64_cmpxchg_release atomic64_cmpxchg_release -#endif -#if defined(arch_atomic64_cmpxchg_relaxed) static __always_inline s64 atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_cmpxchg_relaxed(v, old, new); } -#define atomic64_cmpxchg_relaxed atomic64_cmpxchg_relaxed -#endif -#if defined(arch_atomic64_try_cmpxchg) static __always_inline bool atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) { @@ -1505,10 +1082,7 @@ atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) instrument_atomic_read_write(old, sizeof(*old)); return arch_atomic64_try_cmpxchg(v, old, new); } -#define atomic64_try_cmpxchg atomic64_try_cmpxchg -#endif -#if defined(arch_atomic64_try_cmpxchg_acquire) static __always_inline bool atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new) { @@ -1516,10 +1090,7 @@ atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new) instrument_atomic_read_write(old, sizeof(*old)); return arch_atomic64_try_cmpxchg_acquire(v, old, new); } -#define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg_acquire -#endif -#if defined(arch_atomic64_try_cmpxchg_release) static __always_inline bool atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new) { @@ -1527,10 +1098,7 @@ atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new) instrument_atomic_read_write(old, sizeof(*old)); return arch_atomic64_try_cmpxchg_release(v, old, new); } -#define atomic64_try_cmpxchg_release atomic64_try_cmpxchg_release -#endif -#if defined(arch_atomic64_try_cmpxchg_relaxed) static __always_inline bool atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new) { @@ -1538,218 +1106,161 @@ atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new) instrument_atomic_read_write(old, sizeof(*old)); return arch_atomic64_try_cmpxchg_relaxed(v, old, new); } -#define atomic64_try_cmpxchg_relaxed atomic64_try_cmpxchg_relaxed -#endif -#if defined(arch_atomic64_sub_and_test) static __always_inline bool atomic64_sub_and_test(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_sub_and_test(i, v); } -#define atomic64_sub_and_test atomic64_sub_and_test -#endif -#if defined(arch_atomic64_dec_and_test) static __always_inline bool atomic64_dec_and_test(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_dec_and_test(v); } -#define atomic64_dec_and_test atomic64_dec_and_test -#endif -#if defined(arch_atomic64_inc_and_test) static __always_inline bool atomic64_inc_and_test(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_inc_and_test(v); } -#define atomic64_inc_and_test atomic64_inc_and_test -#endif -#if defined(arch_atomic64_add_negative) static __always_inline bool atomic64_add_negative(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_add_negative(i, v); } -#define atomic64_add_negative atomic64_add_negative -#endif -#if defined(arch_atomic64_fetch_add_unless) static __always_inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_add_unless(v, a, u); } -#define atomic64_fetch_add_unless atomic64_fetch_add_unless -#endif -#if defined(arch_atomic64_add_unless) static __always_inline bool atomic64_add_unless(atomic64_t *v, s64 a, s64 u) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_add_unless(v, a, u); } -#define atomic64_add_unless atomic64_add_unless -#endif -#if defined(arch_atomic64_inc_not_zero) static __always_inline bool atomic64_inc_not_zero(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_inc_not_zero(v); } -#define atomic64_inc_not_zero atomic64_inc_not_zero -#endif -#if defined(arch_atomic64_inc_unless_negative) static __always_inline bool atomic64_inc_unless_negative(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_inc_unless_negative(v); } -#define atomic64_inc_unless_negative atomic64_inc_unless_negative -#endif -#if defined(arch_atomic64_dec_unless_positive) static __always_inline bool atomic64_dec_unless_positive(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_dec_unless_positive(v); } -#define atomic64_dec_unless_positive atomic64_dec_unless_positive -#endif -#if defined(arch_atomic64_dec_if_positive) static __always_inline s64 atomic64_dec_if_positive(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_dec_if_positive(v); } -#define atomic64_dec_if_positive atomic64_dec_if_positive -#endif -#if !defined(arch_xchg_relaxed) || defined(arch_xchg) #define xchg(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_xchg(__ai_ptr, __VA_ARGS__); \ }) -#endif -#if defined(arch_xchg_acquire) #define xchg_acquire(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_xchg_acquire(__ai_ptr, __VA_ARGS__); \ }) -#endif -#if defined(arch_xchg_release) #define xchg_release(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_xchg_release(__ai_ptr, __VA_ARGS__); \ }) -#endif -#if defined(arch_xchg_relaxed) #define xchg_relaxed(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_xchg_relaxed(__ai_ptr, __VA_ARGS__); \ }) -#endif -#if !defined(arch_cmpxchg_relaxed) || defined(arch_cmpxchg) #define cmpxchg(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg(__ai_ptr, __VA_ARGS__); \ }) -#endif -#if defined(arch_cmpxchg_acquire) #define cmpxchg_acquire(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg_acquire(__ai_ptr, __VA_ARGS__); \ }) -#endif -#if defined(arch_cmpxchg_release) #define cmpxchg_release(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg_release(__ai_ptr, __VA_ARGS__); \ }) -#endif -#if defined(arch_cmpxchg_relaxed) #define cmpxchg_relaxed(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg_relaxed(__ai_ptr, __VA_ARGS__); \ }) -#endif -#if !defined(arch_cmpxchg64_relaxed) || defined(arch_cmpxchg64) #define cmpxchg64(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg64(__ai_ptr, __VA_ARGS__); \ }) -#endif -#if defined(arch_cmpxchg64_acquire) #define cmpxchg64_acquire(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg64_acquire(__ai_ptr, __VA_ARGS__); \ }) -#endif -#if defined(arch_cmpxchg64_release) #define cmpxchg64_release(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg64_release(__ai_ptr, __VA_ARGS__); \ }) -#endif -#if defined(arch_cmpxchg64_relaxed) #define cmpxchg64_relaxed(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg64_relaxed(__ai_ptr, __VA_ARGS__); \ }) -#endif -#if !defined(arch_try_cmpxchg_relaxed) || defined(arch_try_cmpxchg) #define try_cmpxchg(ptr, oldp, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ @@ -1758,9 +1269,7 @@ atomic64_dec_if_positive(atomic64_t *v) instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \ arch_try_cmpxchg(__ai_ptr, __ai_oldp, __VA_ARGS__); \ }) -#endif -#if defined(arch_try_cmpxchg_acquire) #define try_cmpxchg_acquire(ptr, oldp, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ @@ -1769,9 +1278,7 @@ atomic64_dec_if_positive(atomic64_t *v) instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \ arch_try_cmpxchg_acquire(__ai_ptr, __ai_oldp, __VA_ARGS__); \ }) -#endif -#if defined(arch_try_cmpxchg_release) #define try_cmpxchg_release(ptr, oldp, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ @@ -1780,9 +1287,7 @@ atomic64_dec_if_positive(atomic64_t *v) instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \ arch_try_cmpxchg_release(__ai_ptr, __ai_oldp, __VA_ARGS__); \ }) -#endif -#if defined(arch_try_cmpxchg_relaxed) #define try_cmpxchg_relaxed(ptr, oldp, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ @@ -1791,7 +1296,6 @@ atomic64_dec_if_positive(atomic64_t *v) instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \ arch_try_cmpxchg_relaxed(__ai_ptr, __ai_oldp, __VA_ARGS__); \ }) -#endif #define cmpxchg_local(ptr, ...) \ ({ \ @@ -1830,4 +1334,4 @@ atomic64_dec_if_positive(atomic64_t *v) }) #endif /* _ASM_GENERIC_ATOMIC_INSTRUMENTED_H */ -// 4bec382e44520f4d8267e42620054db26a659ea3 +// 1d7c3a25aca5c7fb031c307be4c3d24c7b48fcd5 diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h index 11f96f40f4..04b8be9f1a 100644 --- a/include/asm-generic/atomic.h +++ b/include/asm-generic/atomic.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0-or-later */ /* - * Generic C implementation of atomic counter operations. Usable on - * UP systems only. Do not include in machine independent code. + * Generic C implementation of atomic counter operations. Do not include in + * machine independent code. * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) @@ -12,56 +12,39 @@ #include #include -/* - * atomic_$op() - $op integer to atomic variable - * @i: integer value to $op - * @v: pointer to the atomic variable - * - * Atomically $ops @i to @v. Does not strictly guarantee a memory-barrier, use - * smp_mb__{before,after}_atomic(). - */ - -/* - * atomic_$op_return() - $op interer to atomic variable and returns the result - * @i: integer value to $op - * @v: pointer to the atomic variable - * - * Atomically $ops @i to @v. Does imply a full memory barrier. - */ - #ifdef CONFIG_SMP /* we can build all atomic primitives from cmpxchg */ #define ATOMIC_OP(op, c_op) \ -static inline void atomic_##op(int i, atomic_t *v) \ +static inline void generic_atomic_##op(int i, atomic_t *v) \ { \ int c, old; \ \ c = v->counter; \ - while ((old = cmpxchg(&v->counter, c, c c_op i)) != c) \ + while ((old = arch_cmpxchg(&v->counter, c, c c_op i)) != c) \ c = old; \ } #define ATOMIC_OP_RETURN(op, c_op) \ -static inline int atomic_##op##_return(int i, atomic_t *v) \ +static inline int generic_atomic_##op##_return(int i, atomic_t *v) \ { \ int c, old; \ \ c = v->counter; \ - while ((old = cmpxchg(&v->counter, c, c c_op i)) != c) \ + while ((old = arch_cmpxchg(&v->counter, c, c c_op i)) != c) \ c = old; \ \ return c c_op i; \ } #define ATOMIC_FETCH_OP(op, c_op) \ -static inline int atomic_fetch_##op(int i, atomic_t *v) \ +static inline int generic_atomic_fetch_##op(int i, atomic_t *v) \ { \ int c, old; \ \ c = v->counter; \ - while ((old = cmpxchg(&v->counter, c, c c_op i)) != c) \ + while ((old = arch_cmpxchg(&v->counter, c, c c_op i)) != c) \ c = old; \ \ return c; \ @@ -72,7 +55,7 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \ #include #define ATOMIC_OP(op, c_op) \ -static inline void atomic_##op(int i, atomic_t *v) \ +static inline void generic_atomic_##op(int i, atomic_t *v) \ { \ unsigned long flags; \ \ @@ -82,7 +65,7 @@ static inline void atomic_##op(int i, atomic_t *v) \ } #define ATOMIC_OP_RETURN(op, c_op) \ -static inline int atomic_##op##_return(int i, atomic_t *v) \ +static inline int generic_atomic_##op##_return(int i, atomic_t *v) \ { \ unsigned long flags; \ int ret; \ @@ -95,7 +78,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ } #define ATOMIC_FETCH_OP(op, c_op) \ -static inline int atomic_fetch_##op(int i, atomic_t *v) \ +static inline int generic_atomic_fetch_##op(int i, atomic_t *v) \ { \ unsigned long flags; \ int ret; \ @@ -110,87 +93,44 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \ #endif /* CONFIG_SMP */ -#ifndef atomic_add_return ATOMIC_OP_RETURN(add, +) -#endif - -#ifndef atomic_sub_return ATOMIC_OP_RETURN(sub, -) -#endif -#ifndef atomic_fetch_add ATOMIC_FETCH_OP(add, +) -#endif - -#ifndef atomic_fetch_sub ATOMIC_FETCH_OP(sub, -) -#endif - -#ifndef atomic_fetch_and ATOMIC_FETCH_OP(and, &) -#endif - -#ifndef atomic_fetch_or ATOMIC_FETCH_OP(or, |) -#endif - -#ifndef atomic_fetch_xor ATOMIC_FETCH_OP(xor, ^) -#endif -#ifndef atomic_and +ATOMIC_OP(add, +) +ATOMIC_OP(sub, -) ATOMIC_OP(and, &) -#endif - -#ifndef atomic_or ATOMIC_OP(or, |) -#endif - -#ifndef atomic_xor ATOMIC_OP(xor, ^) -#endif #undef ATOMIC_FETCH_OP #undef ATOMIC_OP_RETURN #undef ATOMIC_OP -/* - * Atomic operations that C can't guarantee us. Useful for - * resource counting etc.. - */ +#define arch_atomic_add_return generic_atomic_add_return +#define arch_atomic_sub_return generic_atomic_sub_return -/** - * atomic_read - read atomic variable - * @v: pointer of type atomic_t - * - * Atomically reads the value of @v. - */ -#ifndef atomic_read -#define atomic_read(v) READ_ONCE((v)->counter) -#endif +#define arch_atomic_fetch_add generic_atomic_fetch_add +#define arch_atomic_fetch_sub generic_atomic_fetch_sub +#define arch_atomic_fetch_and generic_atomic_fetch_and +#define arch_atomic_fetch_or generic_atomic_fetch_or +#define arch_atomic_fetch_xor generic_atomic_fetch_xor -/** - * atomic_set - set atomic variable - * @v: pointer of type atomic_t - * @i: required value - * - * Atomically sets the value of @v to @i. - */ -#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i)) +#define arch_atomic_add generic_atomic_add +#define arch_atomic_sub generic_atomic_sub +#define arch_atomic_and generic_atomic_and +#define arch_atomic_or generic_atomic_or +#define arch_atomic_xor generic_atomic_xor -#include +#define arch_atomic_read(v) READ_ONCE((v)->counter) +#define arch_atomic_set(v, i) WRITE_ONCE(((v)->counter), (i)) -static inline void atomic_add(int i, atomic_t *v) -{ - atomic_add_return(i, v); -} - -static inline void atomic_sub(int i, atomic_t *v) -{ - atomic_sub_return(i, v); -} - -#define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v))) -#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new))) +#define arch_atomic_xchg(ptr, v) (arch_xchg(&(ptr)->counter, (v))) +#define arch_atomic_cmpxchg(v, old, new) (arch_cmpxchg(&((v)->counter), (old), (new))) #endif /* __ASM_GENERIC_ATOMIC_H */ diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h index 370f01d445..100d24b02e 100644 --- a/include/asm-generic/atomic64.h +++ b/include/asm-generic/atomic64.h @@ -15,19 +15,17 @@ typedef struct { #define ATOMIC64_INIT(i) { (i) } -extern s64 atomic64_read(const atomic64_t *v); -extern void atomic64_set(atomic64_t *v, s64 i); - -#define atomic64_set_release(v, i) atomic64_set((v), (i)) +extern s64 generic_atomic64_read(const atomic64_t *v); +extern void generic_atomic64_set(atomic64_t *v, s64 i); #define ATOMIC64_OP(op) \ -extern void atomic64_##op(s64 a, atomic64_t *v); +extern void generic_atomic64_##op(s64 a, atomic64_t *v); #define ATOMIC64_OP_RETURN(op) \ -extern s64 atomic64_##op##_return(s64 a, atomic64_t *v); +extern s64 generic_atomic64_##op##_return(s64 a, atomic64_t *v); #define ATOMIC64_FETCH_OP(op) \ -extern s64 atomic64_fetch_##op(s64 a, atomic64_t *v); +extern s64 generic_atomic64_fetch_##op(s64 a, atomic64_t *v); #define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op) ATOMIC64_FETCH_OP(op) @@ -46,11 +44,32 @@ ATOMIC64_OPS(xor) #undef ATOMIC64_OP_RETURN #undef ATOMIC64_OP -extern s64 atomic64_dec_if_positive(atomic64_t *v); -#define atomic64_dec_if_positive atomic64_dec_if_positive -extern s64 atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n); -extern s64 atomic64_xchg(atomic64_t *v, s64 new); -extern s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u); -#define atomic64_fetch_add_unless atomic64_fetch_add_unless +extern s64 generic_atomic64_dec_if_positive(atomic64_t *v); +extern s64 generic_atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n); +extern s64 generic_atomic64_xchg(atomic64_t *v, s64 new); +extern s64 generic_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u); + +#define arch_atomic64_read generic_atomic64_read +#define arch_atomic64_set generic_atomic64_set +#define arch_atomic64_set_release generic_atomic64_set + +#define arch_atomic64_add generic_atomic64_add +#define arch_atomic64_add_return generic_atomic64_add_return +#define arch_atomic64_fetch_add generic_atomic64_fetch_add +#define arch_atomic64_sub generic_atomic64_sub +#define arch_atomic64_sub_return generic_atomic64_sub_return +#define arch_atomic64_fetch_sub generic_atomic64_fetch_sub + +#define arch_atomic64_and generic_atomic64_and +#define arch_atomic64_fetch_and generic_atomic64_fetch_and +#define arch_atomic64_or generic_atomic64_or +#define arch_atomic64_fetch_or generic_atomic64_fetch_or +#define arch_atomic64_xor generic_atomic64_xor +#define arch_atomic64_fetch_xor generic_atomic64_fetch_xor + +#define arch_atomic64_dec_if_positive generic_atomic64_dec_if_positive +#define arch_atomic64_cmpxchg generic_atomic64_cmpxchg +#define arch_atomic64_xchg generic_atomic64_xchg +#define arch_atomic64_fetch_add_unless generic_atomic64_fetch_add_unless #endif /* _ASM_GENERIC_ATOMIC64_H */ diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h index b402494883..edb0e2a602 100644 --- a/include/asm-generic/bug.h +++ b/include/asm-generic/bug.h @@ -4,6 +4,7 @@ #include #include +#include #define CUT_HERE "------------[ cut here ]------------\n" @@ -17,7 +18,8 @@ #endif #ifndef __ASSEMBLY__ -#include +#include +#include #ifdef CONFIG_BUG @@ -140,39 +142,15 @@ void __warn(const char *file, int line, void *caller, unsigned taint, }) #ifndef WARN_ON_ONCE -#define WARN_ON_ONCE(condition) ({ \ - static bool __section(".data.once") __warned; \ - int __ret_warn_once = !!(condition); \ - \ - if (unlikely(__ret_warn_once && !__warned)) { \ - __warned = true; \ - WARN_ON(1); \ - } \ - unlikely(__ret_warn_once); \ -}) +#define WARN_ON_ONCE(condition) \ + DO_ONCE_LITE_IF(condition, WARN_ON, 1) #endif -#define WARN_ONCE(condition, format...) ({ \ - static bool __section(".data.once") __warned; \ - int __ret_warn_once = !!(condition); \ - \ - if (unlikely(__ret_warn_once && !__warned)) { \ - __warned = true; \ - WARN(1, format); \ - } \ - unlikely(__ret_warn_once); \ -}) +#define WARN_ONCE(condition, format...) \ + DO_ONCE_LITE_IF(condition, WARN, 1, format) -#define WARN_TAINT_ONCE(condition, taint, format...) ({ \ - static bool __section(".data.once") __warned; \ - int __ret_warn_once = !!(condition); \ - \ - if (unlikely(__ret_warn_once && !__warned)) { \ - __warned = true; \ - WARN_TAINT(1, taint, format); \ - } \ - unlikely(__ret_warn_once); \ -}) +#define WARN_TAINT_ONCE(condition, taint, format...) \ + DO_ONCE_LITE_IF(condition, WARN_TAINT, 1, taint, format) #else /* !CONFIG_BUG */ #ifndef HAVE_ARCH_BUG diff --git a/include/asm-generic/cmpxchg.h b/include/asm-generic/cmpxchg.h index 9a24510cd8..dca4419922 100644 --- a/include/asm-generic/cmpxchg.h +++ b/include/asm-generic/cmpxchg.h @@ -14,16 +14,14 @@ #include #include -#ifndef xchg - /* * This function doesn't exist, so you'll get a linker error if * something tries to do an invalidly-sized xchg(). */ -extern void __xchg_called_with_bad_pointer(void); +extern void __generic_xchg_called_with_bad_pointer(void); static inline -unsigned long __xchg(unsigned long x, volatile void *ptr, int size) +unsigned long __generic_xchg(unsigned long x, volatile void *ptr, int size) { unsigned long ret, flags; @@ -75,35 +73,43 @@ unsigned long __xchg(unsigned long x, volatile void *ptr, int size) #endif /* CONFIG_64BIT */ default: - __xchg_called_with_bad_pointer(); + __generic_xchg_called_with_bad_pointer(); return x; } } -#define xchg(ptr, x) ({ \ - ((__typeof__(*(ptr))) \ - __xchg((unsigned long)(x), (ptr), sizeof(*(ptr)))); \ +#define generic_xchg(ptr, x) ({ \ + ((__typeof__(*(ptr))) \ + __generic_xchg((unsigned long)(x), (ptr), sizeof(*(ptr)))); \ }) -#endif /* xchg */ - /* * Atomic compare and exchange. */ #include -#ifndef cmpxchg_local -#define cmpxchg_local(ptr, o, n) ({ \ - ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\ - (unsigned long)(n), sizeof(*(ptr)))); \ +#define generic_cmpxchg_local(ptr, o, n) ({ \ + ((__typeof__(*(ptr)))__generic_cmpxchg_local((ptr), (unsigned long)(o), \ + (unsigned long)(n), sizeof(*(ptr)))); \ }) + +#define generic_cmpxchg64_local(ptr, o, n) \ + __generic_cmpxchg64_local((ptr), (o), (n)) + + +#ifndef arch_xchg +#define arch_xchg generic_xchg #endif -#ifndef cmpxchg64_local -#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) +#ifndef arch_cmpxchg_local +#define arch_cmpxchg_local generic_cmpxchg_local #endif -#define cmpxchg(ptr, o, n) cmpxchg_local((ptr), (o), (n)) -#define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n)) +#ifndef arch_cmpxchg64_local +#define arch_cmpxchg64_local generic_cmpxchg64_local +#endif + +#define arch_cmpxchg arch_cmpxchg_local +#define arch_cmpxchg64 arch_cmpxchg64_local #endif /* __ASM_GENERIC_CMPXCHG_H */ diff --git a/include/asm-generic/hyperv-tlfs.h b/include/asm-generic/hyperv-tlfs.h index 515c3fb06a..56348a541c 100644 --- a/include/asm-generic/hyperv-tlfs.h +++ b/include/asm-generic/hyperv-tlfs.h @@ -194,6 +194,7 @@ enum HV_GENERIC_SET_FORMAT { #define HV_STATUS_INVALID_HYPERCALL_INPUT 3 #define HV_STATUS_INVALID_ALIGNMENT 4 #define HV_STATUS_INVALID_PARAMETER 5 +#define HV_STATUS_ACCESS_DENIED 6 #define HV_STATUS_OPERATION_DENIED 8 #define HV_STATUS_INSUFFICIENT_MEMORY 11 #define HV_STATUS_INVALID_PORT_ID 17 diff --git a/include/asm-generic/logic_io.h b/include/asm-generic/logic_io.h new file mode 100644 index 0000000000..a53116b8c5 --- /dev/null +++ b/include/asm-generic/logic_io.h @@ -0,0 +1,78 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2021 Intel Corporation + * Author: johannes@sipsolutions.net + */ +#ifndef _LOGIC_IO_H +#define _LOGIC_IO_H +#include + +/* include this file into asm/io.h */ + +#ifdef CONFIG_INDIRECT_IOMEM + +#ifdef CONFIG_INDIRECT_IOMEM_FALLBACK +/* + * If you want emulated IO memory to fall back to 'normal' IO memory + * if a region wasn't registered as emulated, then you need to have + * all of the real_* functions implemented. + */ +#if !defined(real_ioremap) || !defined(real_iounmap) || \ + !defined(real_raw_readb) || !defined(real_raw_writeb) || \ + !defined(real_raw_readw) || !defined(real_raw_writew) || \ + !defined(real_raw_readl) || !defined(real_raw_writel) || \ + (defined(CONFIG_64BIT) && \ + (!defined(real_raw_readq) || !defined(real_raw_writeq))) || \ + !defined(real_memset_io) || \ + !defined(real_memcpy_fromio) || \ + !defined(real_memcpy_toio) +#error "Must provide fallbacks for real IO memory access" +#endif /* defined ... */ +#endif /* CONFIG_INDIRECT_IOMEM_FALLBACK */ + +#define ioremap ioremap +void __iomem *ioremap(phys_addr_t offset, size_t size); + +#define iounmap iounmap +void iounmap(void __iomem *addr); + +#define __raw_readb __raw_readb +u8 __raw_readb(const volatile void __iomem *addr); + +#define __raw_readw __raw_readw +u16 __raw_readw(const volatile void __iomem *addr); + +#define __raw_readl __raw_readl +u32 __raw_readl(const volatile void __iomem *addr); + +#ifdef CONFIG_64BIT +#define __raw_readq __raw_readq +u64 __raw_readq(const volatile void __iomem *addr); +#endif /* CONFIG_64BIT */ + +#define __raw_writeb __raw_writeb +void __raw_writeb(u8 value, volatile void __iomem *addr); + +#define __raw_writew __raw_writew +void __raw_writew(u16 value, volatile void __iomem *addr); + +#define __raw_writel __raw_writel +void __raw_writel(u32 value, volatile void __iomem *addr); + +#ifdef CONFIG_64BIT +#define __raw_writeq __raw_writeq +void __raw_writeq(u64 value, volatile void __iomem *addr); +#endif /* CONFIG_64BIT */ + +#define memset_io memset_io +void memset_io(volatile void __iomem *addr, int value, size_t size); + +#define memcpy_fromio memcpy_fromio +void memcpy_fromio(void *buffer, const volatile void __iomem *addr, + size_t size); + +#define memcpy_toio memcpy_toio +void memcpy_toio(volatile void __iomem *addr, const void *buffer, size_t size); + +#endif /* CONFIG_INDIRECT_IOMEM */ +#endif /* _LOGIC_IO_H */ diff --git a/include/asm-generic/memory_model.h b/include/asm-generic/memory_model.h index 7637fb46ba..a2c8ed6023 100644 --- a/include/asm-generic/memory_model.h +++ b/include/asm-generic/memory_model.h @@ -6,47 +6,18 @@ #ifndef __ASSEMBLY__ +/* + * supports 3 memory models. + */ #if defined(CONFIG_FLATMEM) #ifndef ARCH_PFN_OFFSET #define ARCH_PFN_OFFSET (0UL) #endif -#elif defined(CONFIG_DISCONTIGMEM) - -#ifndef arch_pfn_to_nid -#define arch_pfn_to_nid(pfn) pfn_to_nid(pfn) -#endif - -#ifndef arch_local_page_offset -#define arch_local_page_offset(pfn, nid) \ - ((pfn) - NODE_DATA(nid)->node_start_pfn) -#endif - -#endif /* CONFIG_DISCONTIGMEM */ - -/* - * supports 3 memory models. - */ -#if defined(CONFIG_FLATMEM) - #define __pfn_to_page(pfn) (mem_map + ((pfn) - ARCH_PFN_OFFSET)) #define __page_to_pfn(page) ((unsigned long)((page) - mem_map) + \ ARCH_PFN_OFFSET) -#elif defined(CONFIG_DISCONTIGMEM) - -#define __pfn_to_page(pfn) \ -({ unsigned long __pfn = (pfn); \ - unsigned long __nid = arch_pfn_to_nid(__pfn); \ - NODE_DATA(__nid)->node_mem_map + arch_local_page_offset(__pfn, __nid);\ -}) - -#define __page_to_pfn(pg) \ -({ const struct page *__pg = (pg); \ - struct pglist_data *__pgdat = NODE_DATA(page_to_nid(__pg)); \ - (unsigned long)(__pg - __pgdat->node_mem_map) + \ - __pgdat->node_start_pfn; \ -}) #elif defined(CONFIG_SPARSEMEM_VMEMMAP) @@ -70,7 +41,7 @@ struct mem_section *__sec = __pfn_to_section(__pfn); \ __section_mem_map_addr(__sec) + __pfn; \ }) -#endif /* CONFIG_FLATMEM/DISCONTIGMEM/SPARSEMEM */ +#endif /* CONFIG_FLATMEM/SPARSEMEM */ /* * Convert a physical address to a Page Frame Number and back diff --git a/include/asm-generic/pgtable-nop4d.h b/include/asm-generic/pgtable-nop4d.h index ce2cbb3c38..03b7dae47d 100644 --- a/include/asm-generic/pgtable-nop4d.h +++ b/include/asm-generic/pgtable-nop4d.h @@ -9,7 +9,6 @@ typedef struct { pgd_t pgd; } p4d_t; #define P4D_SHIFT PGDIR_SHIFT -#define MAX_PTRS_PER_P4D 1 #define PTRS_PER_P4D 1 #define P4D_SIZE (1UL << P4D_SHIFT) #define P4D_MASK (~(P4D_SIZE-1)) @@ -42,7 +41,7 @@ static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address) #define __p4d(x) ((p4d_t) { __pgd(x) }) #define pgd_page(pgd) (p4d_page((p4d_t){ pgd })) -#define pgd_page_vaddr(pgd) (p4d_page_vaddr((p4d_t){ pgd })) +#define pgd_page_vaddr(pgd) ((unsigned long)(p4d_pgtable((p4d_t){ pgd }))) /* * allocating and freeing a p4d is trivial: the 1-entry p4d is diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h index 3e13acd019..10789cf51d 100644 --- a/include/asm-generic/pgtable-nopmd.h +++ b/include/asm-generic/pgtable-nopmd.h @@ -51,7 +51,7 @@ static inline pmd_t * pmd_offset(pud_t * pud, unsigned long address) #define __pmd(x) ((pmd_t) { __pud(x) } ) #define pud_page(pud) (pmd_page((pmd_t){ pud })) -#define pud_page_vaddr(pud) (pmd_page_vaddr((pmd_t){ pud })) +#define pud_pgtable(pud) ((pmd_t *)(pmd_page_vaddr((pmd_t){ pud }))) /* * allocating and freeing a pmd is trivial: the 1-entry pmd is diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h index a9d751fbda..eb70c6d7ce 100644 --- a/include/asm-generic/pgtable-nopud.h +++ b/include/asm-generic/pgtable-nopud.h @@ -49,7 +49,7 @@ static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address) #define __pud(x) ((pud_t) { __p4d(x) }) #define p4d_page(p4d) (pud_page((pud_t){ p4d })) -#define p4d_page_vaddr(p4d) (pud_page_vaddr((pud_t){ p4d })) +#define p4d_pgtable(p4d) ((pud_t *)(pud_pgtable((pud_t){ p4d }))) /* * allocating and freeing a pud is trivial: the 1-entry pud is diff --git a/include/asm-generic/preempt.h b/include/asm-generic/preempt.h index d683f5e6d7..b4d43a4af5 100644 --- a/include/asm-generic/preempt.h +++ b/include/asm-generic/preempt.h @@ -29,7 +29,7 @@ static __always_inline void preempt_count_set(int pc) } while (0) #define init_idle_preempt_count(p, cpu) do { \ - task_thread_info(p)->preempt_count = PREEMPT_ENABLED; \ + task_thread_info(p)->preempt_count = PREEMPT_DISABLED; \ } while (0) static __always_inline void set_preempt_need_resched(void) diff --git a/include/asm-generic/topology.h b/include/asm-generic/topology.h index 5aa8705df8..4dbe715be6 100644 --- a/include/asm-generic/topology.h +++ b/include/asm-generic/topology.h @@ -45,7 +45,7 @@ #endif #ifndef cpumask_of_node - #ifdef CONFIG_NEED_MULTIPLE_NODES + #ifdef CONFIG_NUMA #define cpumask_of_node(node) ((node) == 0 ? cpu_online_mask : cpu_none_mask) #else #define cpumask_of_node(node) ((void)(node), cpu_online_mask) diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h index 4973328f3c..a0b2f270dd 100644 --- a/include/asm-generic/uaccess.h +++ b/include/asm-generic/uaccess.h @@ -19,7 +19,7 @@ __get_user_fn(size_t size, const void __user *from, void *to) switch (size) { case 1: - *(u8 *)to = get_unaligned((u8 __force *)from); + *(u8 *)to = *((u8 __force *)from); return 0; case 2: *(u16 *)to = get_unaligned((u16 __force *)from); @@ -45,7 +45,7 @@ __put_user_fn(size_t size, void __user *to, void *from) switch (size) { case 1: - put_unaligned(*(u8 *)from, (u8 __force *)to); + *(u8 __force *)to = *(u8 *)from; return 0; case 2: put_unaligned(*(u16 *)from, (u16 __force *)to); diff --git a/include/asm-generic/unaligned.h b/include/asm-generic/unaligned.h index 374c940e9b..1c4242416c 100644 --- a/include/asm-generic/unaligned.h +++ b/include/asm-generic/unaligned.h @@ -6,31 +6,124 @@ * This is the most generic implementation of unaligned accesses * and should work almost anywhere. */ +#include #include -/* Set by the arch if it can handle unaligned accesses in hardware. */ -#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS -# include -#endif +#define __get_unaligned_t(type, ptr) ({ \ + const struct { type x; } __packed *__pptr = (typeof(__pptr))(ptr); \ + __pptr->x; \ +}) -#if defined(__LITTLE_ENDIAN) -# ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS -# include -# include -# endif -# include -# define get_unaligned __get_unaligned_le -# define put_unaligned __put_unaligned_le -#elif defined(__BIG_ENDIAN) -# ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS -# include -# include -# endif -# include -# define get_unaligned __get_unaligned_be -# define put_unaligned __put_unaligned_be -#else -# error need to define endianess -#endif +#define __put_unaligned_t(type, val, ptr) do { \ + struct { type x; } __packed *__pptr = (typeof(__pptr))(ptr); \ + __pptr->x = (val); \ +} while (0) + +#define get_unaligned(ptr) __get_unaligned_t(typeof(*(ptr)), (ptr)) +#define put_unaligned(val, ptr) __put_unaligned_t(typeof(*(ptr)), (val), (ptr)) + +static inline u16 get_unaligned_le16(const void *p) +{ + return le16_to_cpu(__get_unaligned_t(__le16, p)); +} + +static inline u32 get_unaligned_le32(const void *p) +{ + return le32_to_cpu(__get_unaligned_t(__le32, p)); +} + +static inline u64 get_unaligned_le64(const void *p) +{ + return le64_to_cpu(__get_unaligned_t(__le64, p)); +} + +static inline void put_unaligned_le16(u16 val, void *p) +{ + __put_unaligned_t(__le16, cpu_to_le16(val), p); +} + +static inline void put_unaligned_le32(u32 val, void *p) +{ + __put_unaligned_t(__le32, cpu_to_le32(val), p); +} + +static inline void put_unaligned_le64(u64 val, void *p) +{ + __put_unaligned_t(__le64, cpu_to_le64(val), p); +} + +static inline u16 get_unaligned_be16(const void *p) +{ + return be16_to_cpu(__get_unaligned_t(__be16, p)); +} + +static inline u32 get_unaligned_be32(const void *p) +{ + return be32_to_cpu(__get_unaligned_t(__be32, p)); +} + +static inline u64 get_unaligned_be64(const void *p) +{ + return be64_to_cpu(__get_unaligned_t(__be64, p)); +} + +static inline void put_unaligned_be16(u16 val, void *p) +{ + __put_unaligned_t(__be16, cpu_to_be16(val), p); +} + +static inline void put_unaligned_be32(u32 val, void *p) +{ + __put_unaligned_t(__be32, cpu_to_be32(val), p); +} + +static inline void put_unaligned_be64(u64 val, void *p) +{ + __put_unaligned_t(__be64, cpu_to_be64(val), p); +} + +static inline u32 __get_unaligned_be24(const u8 *p) +{ + return p[0] << 16 | p[1] << 8 | p[2]; +} + +static inline u32 get_unaligned_be24(const void *p) +{ + return __get_unaligned_be24(p); +} + +static inline u32 __get_unaligned_le24(const u8 *p) +{ + return p[0] | p[1] << 8 | p[2] << 16; +} + +static inline u32 get_unaligned_le24(const void *p) +{ + return __get_unaligned_le24(p); +} + +static inline void __put_unaligned_be24(const u32 val, u8 *p) +{ + *p++ = val >> 16; + *p++ = val >> 8; + *p++ = val; +} + +static inline void put_unaligned_be24(const u32 val, void *p) +{ + __put_unaligned_be24(val, p); +} + +static inline void __put_unaligned_le24(const u32 val, u8 *p) +{ + *p++ = val; + *p++ = val >> 8; + *p++ = val >> 16; +} + +static inline void put_unaligned_le24(const u32 val, void *p) +{ + __put_unaligned_le24(val, p); +} #endif /* __ASM_GENERIC_UNALIGNED_H */ diff --git a/include/clocksource/samsung_pwm.h b/include/clocksource/samsung_pwm.h index c395238d09..9b435caa95 100644 --- a/include/clocksource/samsung_pwm.h +++ b/include/clocksource/samsung_pwm.h @@ -27,6 +27,7 @@ struct samsung_pwm_variant { }; void samsung_pwm_clocksource_init(void __iomem *base, - unsigned int *irqs, struct samsung_pwm_variant *variant); + unsigned int *irqs, + const struct samsung_pwm_variant *variant); #endif /* __CLOCKSOURCE_SAMSUNG_PWM_H */ diff --git a/include/clocksource/timer-ti-dm.h b/include/clocksource/timer-ti-dm.h index 4c61dade88..f6da8a1326 100644 --- a/include/clocksource/timer-ti-dm.h +++ b/include/clocksource/timer-ti-dm.h @@ -74,6 +74,7 @@ #define OMAP_TIMER_ERRATA_I103_I767 0x80000000 struct timer_regs { + u32 ocp_cfg; u32 tidr; u32 tier; u32 twer; diff --git a/include/crypto/aead.h b/include/crypto/aead.h index e728469c4c..5af914c1ab 100644 --- a/include/crypto/aead.h +++ b/include/crypto/aead.h @@ -490,7 +490,7 @@ static inline void aead_request_set_callback(struct aead_request *req, * The memory structure for cipher operation has the following structure: * * - AEAD encryption input: assoc data || plaintext - * - AEAD encryption output: assoc data || cipherntext || auth tag + * - AEAD encryption output: assoc data || ciphertext || auth tag * - AEAD decryption input: assoc data || ciphertext || auth tag * - AEAD decryption output: assoc data || plaintext * diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h index 86f0748009..5f6841c73e 100644 --- a/include/crypto/algapi.h +++ b/include/crypto/algapi.h @@ -96,6 +96,15 @@ struct scatter_walk { unsigned int offset; }; +struct crypto_attr_alg { + char name[CRYPTO_MAX_ALG_NAME]; +}; + +struct crypto_attr_type { + u32 type; + u32 mask; +}; + void crypto_mod_put(struct crypto_alg *alg); int crypto_register_template(struct crypto_template *tmpl); @@ -118,7 +127,6 @@ void *crypto_spawn_tfm2(struct crypto_spawn *spawn); struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb); int crypto_check_attr_type(struct rtattr **tb, u32 type, u32 *mask_ret); const char *crypto_attr_alg_name(struct rtattr *rta); -int crypto_attr_u32(struct rtattr *rta, u32 *num); int crypto_inst_setname(struct crypto_instance *inst, const char *name, struct crypto_alg *alg); diff --git a/include/crypto/engine.h b/include/crypto/engine.h index 3f06e40d06..26cac19b0f 100644 --- a/include/crypto/engine.h +++ b/include/crypto/engine.h @@ -28,7 +28,7 @@ * of a failed backlog request * crypto-engine, in head position to keep order * @list: link with the global crypto engine list - * @queue_lock: spinlock to syncronise access to request queue + * @queue_lock: spinlock to synchronise access to request queue * @queue: the crypto queue of the engine * @rt: whether this queue is set to run as a realtime task * @prepare_crypt_hardware: a request will soon arrive from the queue diff --git a/include/crypto/hash.h b/include/crypto/hash.h index b2bc1e46e8..f140e46439 100644 --- a/include/crypto/hash.h +++ b/include/crypto/hash.h @@ -458,7 +458,7 @@ int crypto_ahash_finup(struct ahash_request *req); * * Return: * 0 if the message digest was successfully calculated; - * -EINPROGRESS if data is feeded into hardware (DMA) or queued for later; + * -EINPROGRESS if data is fed into hardware (DMA) or queued for later; * -EBUSY if queue is full and request should be resubmitted later; * other < 0 if an error occurred */ diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h index 0a288dddcf..25806141db 100644 --- a/include/crypto/internal/hash.h +++ b/include/crypto/internal/hash.h @@ -75,13 +75,7 @@ void crypto_unregister_ahashes(struct ahash_alg *algs, int count); int ahash_register_instance(struct crypto_template *tmpl, struct ahash_instance *inst); -int shash_no_setkey(struct crypto_shash *tfm, const u8 *key, - unsigned int keylen); - -static inline bool crypto_shash_alg_has_setkey(struct shash_alg *alg) -{ - return alg->setkey != shash_no_setkey; -} +bool crypto_shash_alg_has_setkey(struct shash_alg *alg); static inline bool crypto_shash_alg_needs_key(struct shash_alg *alg) { diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h index c837d07754..7af08174a7 100644 --- a/include/crypto/scatterwalk.h +++ b/include/crypto/scatterwalk.h @@ -81,12 +81,7 @@ static inline void scatterwalk_pagedone(struct scatter_walk *walk, int out, struct page *page; page = sg_page(walk->sg) + ((walk->offset - 1) >> PAGE_SHIFT); - /* Test ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE first as - * PageSlab cannot be optimised away per se due to - * use of volatile pointer. - */ - if (ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE && !PageSlab(page)) - flush_dcache_page(page); + flush_dcache_page(page); } if (more && walk->offset >= walk->sg->offset + walk->sg->length) diff --git a/include/drm/amd_asic_type.h b/include/drm/amd_asic_type.h index 336e365069..0bf0ad869e 100644 --- a/include/drm/amd_asic_type.h +++ b/include/drm/amd_asic_type.h @@ -59,6 +59,8 @@ enum amd_asic_type { CHIP_NAVY_FLOUNDER, /* 30 */ CHIP_VANGOGH, /* 31 */ CHIP_DIMGREY_CAVEFISH, /* 32 */ + CHIP_BEIGE_GOBY, /* 33 */ + CHIP_YELLOW_CARP, /* 34 */ CHIP_LAST, }; diff --git a/include/drm/bridge/dw_hdmi.h b/include/drm/bridge/dw_hdmi.h index ea34ca146b..6a57166556 100644 --- a/include/drm/bridge/dw_hdmi.h +++ b/include/drm/bridge/dw_hdmi.h @@ -153,6 +153,8 @@ struct dw_hdmi_plat_data { const struct dw_hdmi_phy_config *phy_config; int (*configure_phy)(struct dw_hdmi *hdmi, void *data, unsigned long mpixelclock); + + unsigned int disable_cec : 1; }; struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev, diff --git a/include/drm/drm_aperture.h b/include/drm/drm_aperture.h new file mode 100644 index 0000000000..6c14807878 --- /dev/null +++ b/include/drm/drm_aperture.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: MIT */ + +#ifndef _DRM_APERTURE_H_ +#define _DRM_APERTURE_H_ + +#include + +struct drm_device; +struct pci_dev; + +int devm_aperture_acquire_from_firmware(struct drm_device *dev, resource_size_t base, + resource_size_t size); + +int drm_aperture_remove_conflicting_framebuffers(resource_size_t base, resource_size_t size, + bool primary, const char *name); + +int drm_aperture_remove_conflicting_pci_framebuffers(struct pci_dev *pdev, const char *name); + +/** + * drm_aperture_remove_framebuffers - remove all existing framebuffers + * @primary: also kick vga16fb if present + * @name: requesting driver name + * + * This function removes all graphics device drivers. Use this function on systems + * that can have their framebuffer located anywhere in memory. + * + * Returns: + * 0 on success, or a negative errno code otherwise + */ +static inline int drm_aperture_remove_framebuffers(bool primary, const char *name) +{ + return drm_aperture_remove_conflicting_framebuffers(0, (resource_size_t)-1, primary, name); +} + +#endif diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h index ac5a28eff2..1701c2128a 100644 --- a/include/drm/drm_atomic.h +++ b/include/drm/drm_atomic.h @@ -895,6 +895,22 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p); (old_plane_state) = (__state)->planes[__i].old_state,\ (new_plane_state) = (__state)->planes[__i].new_state, 1)) +/** + * for_each_new_plane_in_state_reverse - other than only tracking new state, + * it's the same as for_each_oldnew_plane_in_state_reverse + * @__state: &struct drm_atomic_state pointer + * @plane: &struct drm_plane iteration cursor + * @new_plane_state: &struct drm_plane_state iteration cursor for the new state + * @__i: int iteration cursor, for macro-internal use + */ +#define for_each_new_plane_in_state_reverse(__state, plane, new_plane_state, __i) \ + for ((__i) = ((__state)->dev->mode_config.num_total_plane - 1); \ + (__i) >= 0; \ + (__i)--) \ + for_each_if ((__state)->planes[__i].ptr && \ + ((plane) = (__state)->planes[__i].ptr, \ + (new_plane_state) = (__state)->planes[__i].new_state, 1)) + /** * for_each_old_plane_in_state - iterate over all planes in an atomic update * @__state: &struct drm_atomic_state pointer diff --git a/include/drm/drm_cache.h b/include/drm/drm_cache.h index e9ad4863d9..cc9de1632d 100644 --- a/include/drm/drm_cache.h +++ b/include/drm/drm_cache.h @@ -35,6 +35,8 @@ #include +struct dma_buf_map; + void drm_clflush_pages(struct page *pages[], unsigned long num_pages); void drm_clflush_sg(struct sg_table *st); void drm_clflush_virt_range(void *addr, unsigned long length); @@ -70,4 +72,9 @@ static inline bool drm_arch_can_wc_memory(void) #endif } +void drm_memcpy_init_early(void); + +void drm_memcpy_from_wc(struct dma_buf_map *dst, + const struct dma_buf_map *src, + unsigned long len); #endif diff --git a/include/drm/drm_device.h b/include/drm/drm_device.h index d647223e83..f588f967bb 100644 --- a/include/drm/drm_device.h +++ b/include/drm/drm_device.h @@ -276,12 +276,6 @@ struct drm_device { */ spinlock_t event_lock; - /** @agp: AGP data */ - struct drm_agp_head *agp; - - /** @pdev: PCI device structure */ - struct pci_dev *pdev; - /** @num_crtcs: Number of CRTCs on this device */ unsigned int num_crtcs; @@ -329,6 +323,9 @@ struct drm_device { struct pci_controller *hose; #endif + /* AGP data */ + struct drm_agp_head *agp; + /* Context handle management - linked list of context handles */ struct list_head ctxlist; diff --git a/include/drm/drm_dp_dual_mode_helper.h b/include/drm/drm_dp_dual_mode_helper.h index 4c42db81fc..7ee4822650 100644 --- a/include/drm/drm_dp_dual_mode_helper.h +++ b/include/drm/drm_dp_dual_mode_helper.h @@ -62,6 +62,7 @@ #define DP_DUAL_MODE_LSPCON_CURRENT_MODE 0x41 #define DP_DUAL_MODE_LSPCON_MODE_PCON 0x1 +struct drm_device; struct i2c_adapter; ssize_t drm_dp_dual_mode_read(struct i2c_adapter *adapter, @@ -103,17 +104,18 @@ enum drm_dp_dual_mode_type { DRM_DP_DUAL_MODE_LSPCON, }; -enum drm_dp_dual_mode_type drm_dp_dual_mode_detect(struct i2c_adapter *adapter); -int drm_dp_dual_mode_max_tmds_clock(enum drm_dp_dual_mode_type type, +enum drm_dp_dual_mode_type +drm_dp_dual_mode_detect(const struct drm_device *dev, struct i2c_adapter *adapter); +int drm_dp_dual_mode_max_tmds_clock(const struct drm_device *dev, enum drm_dp_dual_mode_type type, struct i2c_adapter *adapter); -int drm_dp_dual_mode_get_tmds_output(enum drm_dp_dual_mode_type type, +int drm_dp_dual_mode_get_tmds_output(const struct drm_device *dev, enum drm_dp_dual_mode_type type, struct i2c_adapter *adapter, bool *enabled); -int drm_dp_dual_mode_set_tmds_output(enum drm_dp_dual_mode_type type, +int drm_dp_dual_mode_set_tmds_output(const struct drm_device *dev, enum drm_dp_dual_mode_type type, struct i2c_adapter *adapter, bool enable); const char *drm_dp_get_dual_mode_type_name(enum drm_dp_dual_mode_type type); -int drm_lspcon_get_mode(struct i2c_adapter *adapter, +int drm_lspcon_get_mode(const struct drm_device *dev, struct i2c_adapter *adapter, enum drm_lspcon_mode *current_mode); -int drm_lspcon_set_mode(struct i2c_adapter *adapter, +int drm_lspcon_set_mode(const struct drm_device *dev, struct i2c_adapter *adapter, enum drm_lspcon_mode reqd_mode); #endif diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h index 1e85c2021f..3f2715eb96 100644 --- a/include/drm/drm_dp_helper.h +++ b/include/drm/drm_dp_helper.h @@ -29,6 +29,7 @@ #include struct drm_device; +struct drm_dp_aux; /* * Unless otherwise noted, all values are from the DP 1.1a spec. Note that @@ -687,14 +688,14 @@ struct drm_device; #define DP_DSC_ENABLE 0x160 /* DP 1.4 */ # define DP_DECOMPRESSION_EN (1 << 0) -#define DP_PSR_EN_CFG 0x170 /* XXX 1.2? */ -# define DP_PSR_ENABLE (1 << 0) -# define DP_PSR_MAIN_LINK_ACTIVE (1 << 1) -# define DP_PSR_CRC_VERIFICATION (1 << 2) -# define DP_PSR_FRAME_CAPTURE (1 << 3) -# define DP_PSR_SELECTIVE_UPDATE (1 << 4) -# define DP_PSR_IRQ_HPD_WITH_CRC_ERRORS (1 << 5) -# define DP_PSR_ENABLE_PSR2 (1 << 6) /* eDP 1.4a */ +#define DP_PSR_EN_CFG 0x170 /* XXX 1.2? */ +# define DP_PSR_ENABLE BIT(0) +# define DP_PSR_MAIN_LINK_ACTIVE BIT(1) +# define DP_PSR_CRC_VERIFICATION BIT(2) +# define DP_PSR_FRAME_CAPTURE BIT(3) +# define DP_PSR_SU_REGION_SCANLINE_CAPTURE BIT(4) /* eDP 1.4a */ +# define DP_PSR_IRQ_HPD_WITH_CRC_ERRORS BIT(5) /* eDP 1.4a */ +# define DP_PSR_ENABLE_PSR2 BIT(6) /* eDP 1.4a */ #define DP_ADAPTER_CTRL 0x1a0 # define DP_ADAPTER_CTRL_FORCE_LOAD_SENSE (1 << 0) @@ -1376,10 +1377,27 @@ enum drm_dp_phy { #define DP_SYMBOL_ERROR_COUNT_LANE1_PHY_REPEATER1 0xf0037 /* 1.3 */ #define DP_SYMBOL_ERROR_COUNT_LANE2_PHY_REPEATER1 0xf0039 /* 1.3 */ #define DP_SYMBOL_ERROR_COUNT_LANE3_PHY_REPEATER1 0xf003b /* 1.3 */ + +#define __DP_FEC1_BASE 0xf0290 /* 1.4 */ +#define __DP_FEC2_BASE 0xf0298 /* 1.4 */ +#define DP_FEC_BASE(dp_phy) \ + (__DP_FEC1_BASE + ((__DP_FEC2_BASE - __DP_FEC1_BASE) * \ + ((dp_phy) - DP_PHY_LTTPR1))) + +#define DP_FEC_REG(dp_phy, fec1_reg) \ + (DP_FEC_BASE(dp_phy) - DP_FEC_BASE(DP_PHY_LTTPR1) + fec1_reg) + #define DP_FEC_STATUS_PHY_REPEATER1 0xf0290 /* 1.4 */ +#define DP_FEC_STATUS_PHY_REPEATER(dp_phy) \ + DP_FEC_REG(dp_phy, DP_FEC_STATUS_PHY_REPEATER1) + #define DP_FEC_ERROR_COUNT_PHY_REPEATER1 0xf0291 /* 1.4 */ #define DP_FEC_CAPABILITY_PHY_REPEATER1 0xf0294 /* 1.4a */ +#define DP_LTTPR_MAX_ADD 0xf02ff /* 1.4 */ + +#define DP_DPCD_MAX_ADD 0xfffff /* 1.4 */ + /* Repeater modes */ #define DP_PHY_REPEATER_MODE_TRANSPARENT 0x55 /* 1.3 */ #define DP_PHY_REPEATER_MODE_NON_TRANSPARENT 0xaa /* 1.3 */ @@ -1482,10 +1500,13 @@ u8 drm_dp_get_adjust_request_post_cursor(const u8 link_status[DP_LINK_STATUS_SIZ #define DP_LTTPR_COMMON_CAP_SIZE 8 #define DP_LTTPR_PHY_CAP_SIZE 3 -void drm_dp_link_train_clock_recovery_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]); +void drm_dp_link_train_clock_recovery_delay(const struct drm_dp_aux *aux, + const u8 dpcd[DP_RECEIVER_CAP_SIZE]); void drm_dp_lttpr_link_train_clock_recovery_delay(void); -void drm_dp_link_train_channel_eq_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]); -void drm_dp_lttpr_link_train_channel_eq_delay(const u8 caps[DP_LTTPR_PHY_CAP_SIZE]); +void drm_dp_link_train_channel_eq_delay(const struct drm_dp_aux *aux, + const u8 dpcd[DP_RECEIVER_CAP_SIZE]); +void drm_dp_lttpr_link_train_channel_eq_delay(const struct drm_dp_aux *aux, + const u8 caps[DP_LTTPR_PHY_CAP_SIZE]); u8 drm_dp_link_rate_to_bw_code(int link_rate); int drm_dp_bw_code_to_link_rate(u8 link_bw); @@ -1840,6 +1861,8 @@ struct drm_dp_aux_cec { * @name: user-visible name of this AUX channel and the I2C-over-AUX adapter * @ddc: I2C adapter that can be used for I2C-over-AUX communication * @dev: pointer to struct device that is the parent for this AUX channel + * @drm_dev: pointer to the &drm_device that owns this AUX channel. Beware, this + * may be %NULL before drm_dp_aux_register() has been called. * @crtc: backpointer to the crtc that is currently using this AUX channel * @hw_mutex: internal mutex used for locking transfers * @crc_work: worker that captures CRCs for each frame @@ -1847,7 +1870,11 @@ struct drm_dp_aux_cec { * @transfer: transfers a message representing a single AUX transaction * * The @dev field should be set to a pointer to the device that implements the - * AUX channel. + * AUX channel. As well, the @drm_dev field should be set to the &drm_device + * that will be using this AUX channel as early as possible. For many graphics + * drivers this should happen before drm_dp_aux_init(), however it's perfectly + * fine to set this field later so long as it's assigned before calling + * drm_dp_aux_register(). * * The @name field may be used to specify the name of the I2C adapter. If set to * %NULL, dev_name() of @dev will be used. @@ -1879,6 +1906,7 @@ struct drm_dp_aux { const char *name; struct i2c_adapter ddc; struct device *dev; + struct drm_device *drm_dev; struct drm_crtc *crtc; struct mutex hw_mutex; struct work_struct crc_work; diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h index bd1c39907b..ddb9231d03 100644 --- a/include/drm/drm_dp_mst_helper.h +++ b/include/drm/drm_dp_mst_helper.h @@ -593,6 +593,14 @@ struct drm_dp_mst_topology_mgr { * @max_payloads: maximum number of payloads the GPU can generate. */ int max_payloads; + /** + * @max_lane_count: maximum number of lanes the GPU can drive. + */ + int max_lane_count; + /** + * @max_link_rate: maximum link rate per lane GPU can output, in kHz. + */ + int max_link_rate; /** * @conn_base_id: DRM connector ID this mgr is connected to. Only used * to build the MST connector path value. @@ -765,7 +773,9 @@ struct drm_dp_mst_topology_mgr { int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr, struct drm_device *dev, struct drm_dp_aux *aux, int max_dpcd_transaction_bytes, - int max_payloads, int conn_base_id); + int max_payloads, + int max_lane_count, int max_link_rate, + int conn_base_id); void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr); @@ -783,7 +793,8 @@ drm_dp_mst_detect_port(struct drm_connector *connector, struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port); -int drm_dp_get_vc_payload_bw(int link_rate, int link_lane_count); +int drm_dp_get_vc_payload_bw(const struct drm_dp_mst_topology_mgr *mgr, + int link_rate, int link_lane_count); int drm_dp_calc_pbn_mode(int clock, int bpp, bool dsc); diff --git a/include/drm/drm_fb_cma_helper.h b/include/drm/drm_fb_cma_helper.h index 795aea1d0a..6447e34528 100644 --- a/include/drm/drm_fb_cma_helper.h +++ b/include/drm/drm_fb_cma_helper.h @@ -4,6 +4,7 @@ #include +struct drm_device; struct drm_framebuffer; struct drm_plane_state; @@ -14,5 +15,9 @@ dma_addr_t drm_fb_cma_get_gem_addr(struct drm_framebuffer *fb, struct drm_plane_state *state, unsigned int plane); +void drm_fb_cma_sync_non_coherent(struct drm_device *drm, + struct drm_plane_state *old_state, + struct drm_plane_state *state); + #endif diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h index 3b273f9ca3..3af4624368 100644 --- a/include/drm/drm_fb_helper.h +++ b/include/drm/drm_fb_helper.h @@ -36,7 +36,6 @@ struct drm_fb_helper; #include #include #include -#include enum mode_set_atomic { LEAVE_ATOMIC_MODE_SET, @@ -451,54 +450,4 @@ drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp) #endif -/** - * drm_fb_helper_remove_conflicting_framebuffers - remove firmware-configured framebuffers - * @a: memory range, users of which are to be removed - * @name: requesting driver name - * @primary: also kick vga16fb if present - * - * This function removes framebuffer devices (initialized by firmware/bootloader) - * which use memory range described by @a. If @a is NULL all such devices are - * removed. - */ -static inline int -drm_fb_helper_remove_conflicting_framebuffers(struct apertures_struct *a, - const char *name, bool primary) -{ -#if IS_REACHABLE(CONFIG_FB) - return remove_conflicting_framebuffers(a, name, primary); -#else - return 0; -#endif -} - -/** - * drm_fb_helper_remove_conflicting_pci_framebuffers - remove firmware-configured framebuffers for PCI devices - * @pdev: PCI device - * @name: requesting driver name - * - * This function removes framebuffer devices (eg. initialized by firmware) - * using memory range configured for any of @pdev's memory bars. - * - * The function assumes that PCI device with shadowed ROM drives a primary - * display and so kicks out vga16fb. - */ -static inline int -drm_fb_helper_remove_conflicting_pci_framebuffers(struct pci_dev *pdev, - const char *name) -{ - int ret = 0; - - /* - * WARNING: Apparently we must kick fbdev drivers before vgacon, - * otherwise the vga fbdev driver falls over. - */ -#if IS_REACHABLE(CONFIG_FB) - ret = remove_conflicting_pci_framebuffers(pdev, name); -#endif - if (ret == 0) - ret = vga_remove_vgacon(pdev); - return ret; -} - #endif diff --git a/include/drm/drm_format_helper.h b/include/drm/drm_format_helper.h index 5f9e370324..4e0258a613 100644 --- a/include/drm/drm_format_helper.h +++ b/include/drm/drm_format_helper.h @@ -11,7 +11,7 @@ struct drm_rect; void drm_fb_memcpy(void *dst, void *vaddr, struct drm_framebuffer *fb, struct drm_rect *clip); -void drm_fb_memcpy_dstclip(void __iomem *dst, void *vaddr, +void drm_fb_memcpy_dstclip(void __iomem *dst, unsigned int dst_pitch, void *vaddr, struct drm_framebuffer *fb, struct drm_rect *clip); void drm_fb_swab(void *dst, void *src, struct drm_framebuffer *fb, @@ -28,4 +28,12 @@ void drm_fb_xrgb8888_to_rgb888_dstclip(void __iomem *dst, unsigned int dst_pitch void drm_fb_xrgb8888_to_gray8(u8 *dst, void *vaddr, struct drm_framebuffer *fb, struct drm_rect *clip); +int drm_fb_blit_rect_dstclip(void __iomem *dst, unsigned int dst_pitch, + uint32_t dst_format, void *vmap, + struct drm_framebuffer *fb, + struct drm_rect *rect); +int drm_fb_blit_dstclip(void __iomem *dst, unsigned int dst_pitch, + uint32_t dst_format, void *vmap, + struct drm_framebuffer *fb); + #endif /* __LINUX_DRM_FORMAT_HELPER_H */ diff --git a/include/drm/drm_fourcc.h b/include/drm/drm_fourcc.h index 156b122c0a..3b138d4ae6 100644 --- a/include/drm/drm_fourcc.h +++ b/include/drm/drm_fourcc.h @@ -135,14 +135,6 @@ struct drm_format_info { bool is_yuv; }; -/** - * struct drm_format_name_buf - name of a DRM format - * @str: string buffer containing the format name - */ -struct drm_format_name_buf { - char str[32]; -}; - /** * drm_format_info_is_yuv_packed - check that the format info matches a YUV * format with data laid in a single plane @@ -318,6 +310,5 @@ unsigned int drm_format_info_block_height(const struct drm_format_info *info, int plane); uint64_t drm_format_info_min_pitch(const struct drm_format_info *info, int plane, unsigned int buffer_width); -const char *drm_get_format_name(uint32_t format, struct drm_format_name_buf *buf); #endif /* __DRM_FOURCC_H__ */ diff --git a/include/drm/drm_gem_cma_helper.h b/include/drm/drm_gem_cma_helper.h index 0a9711caa3..cd13508acb 100644 --- a/include/drm/drm_gem_cma_helper.h +++ b/include/drm/drm_gem_cma_helper.h @@ -16,6 +16,7 @@ struct drm_mode_create_dumb; * more than one entry but they are guaranteed to have contiguous * DMA addresses. * @vaddr: kernel virtual address of the backing memory + * @map_noncoherent: if true, the GEM object is backed by non-coherent memory */ struct drm_gem_cma_object { struct drm_gem_object base; @@ -24,6 +25,8 @@ struct drm_gem_cma_object { /* For objects with DMA memory allocated by GEM CMA */ void *vaddr; + + bool map_noncoherent; }; #define to_drm_gem_cma_obj(gem_obj) \ diff --git a/include/drm/drm_gem_ttm_helper.h b/include/drm/drm_gem_ttm_helper.h index 7c6d874910..c1aa02bd4c 100644 --- a/include/drm/drm_gem_ttm_helper.h +++ b/include/drm/drm_gem_ttm_helper.h @@ -5,8 +5,8 @@ #include -#include #include +#include #include #include @@ -24,4 +24,7 @@ void drm_gem_ttm_vunmap(struct drm_gem_object *gem, int drm_gem_ttm_mmap(struct drm_gem_object *gem, struct vm_area_struct *vma); +int drm_gem_ttm_dumb_map_offset(struct drm_file *file, struct drm_device *dev, + uint32_t handle, uint64_t *offset); + #endif diff --git a/include/drm/drm_gem_vram_helper.h b/include/drm/drm_gem_vram_helper.h index 288055d397..27ed7e9243 100644 --- a/include/drm/drm_gem_vram_helper.h +++ b/include/drm/drm_gem_vram_helper.h @@ -5,6 +5,7 @@ #include #include +#include #include #include #include @@ -93,7 +94,6 @@ struct drm_gem_vram_object *drm_gem_vram_create(struct drm_device *dev, size_t size, unsigned long pg_align); void drm_gem_vram_put(struct drm_gem_vram_object *gbo); -u64 drm_gem_vram_mmap_offset(struct drm_gem_vram_object *gbo); s64 drm_gem_vram_offset(struct drm_gem_vram_object *gbo); int drm_gem_vram_pin(struct drm_gem_vram_object *gbo, unsigned long pl_flag); int drm_gem_vram_unpin(struct drm_gem_vram_object *gbo); @@ -113,9 +113,6 @@ int drm_gem_vram_fill_create_dumb(struct drm_file *file, int drm_gem_vram_driver_dumb_create(struct drm_file *file, struct drm_device *dev, struct drm_mode_create_dumb *args); -int drm_gem_vram_driver_dumb_mmap_offset(struct drm_file *file, - struct drm_device *dev, - uint32_t handle, uint64_t *offset); /* * Helpers for struct drm_plane_helper_funcs @@ -149,7 +146,7 @@ void drm_gem_vram_simple_display_pipe_cleanup_fb( #define DRM_GEM_VRAM_DRIVER \ .debugfs_init = drm_vram_mm_debugfs_init, \ .dumb_create = drm_gem_vram_driver_dumb_create, \ - .dumb_map_offset = drm_gem_vram_driver_dumb_mmap_offset, \ + .dumb_map_offset = drm_gem_ttm_dumb_map_offset, \ .gem_prime_mmap = drm_gem_prime_mmap /* diff --git a/include/drm/drm_legacy.h b/include/drm/drm_legacy.h index 8ed04e9be9..b17e79e12b 100644 --- a/include/drm/drm_legacy.h +++ b/include/drm/drm_legacy.h @@ -33,6 +33,8 @@ * OTHER DEALINGS IN THE SOFTWARE. */ +#include + #include #include #include @@ -194,10 +196,6 @@ void drm_legacy_idlelock_release(struct drm_lock_data *lock); #ifdef CONFIG_PCI -struct drm_dma_handle *drm_pci_alloc(struct drm_device *dev, size_t size, - size_t align); -void drm_pci_free(struct drm_device *dev, struct drm_dma_handle *dmah); - int drm_legacy_pci_init(const struct drm_driver *driver, struct pci_driver *pdriver); void drm_legacy_pci_exit(const struct drm_driver *driver, @@ -229,6 +227,86 @@ static inline void drm_legacy_pci_exit(const struct drm_driver *driver, #endif +/* + * AGP Support + */ + +struct drm_agp_head { + struct agp_kern_info agp_info; + struct list_head memory; + unsigned long mode; + struct agp_bridge_data *bridge; + int enabled; + int acquired; + unsigned long base; + int agp_mtrr; + int cant_use_aperture; + unsigned long page_mask; +}; + +#if IS_ENABLED(CONFIG_DRM_LEGACY) && IS_ENABLED(CONFIG_AGP) +struct drm_agp_head *drm_legacy_agp_init(struct drm_device *dev); +int drm_legacy_agp_acquire(struct drm_device *dev); +int drm_legacy_agp_release(struct drm_device *dev); +int drm_legacy_agp_enable(struct drm_device *dev, struct drm_agp_mode mode); +int drm_legacy_agp_info(struct drm_device *dev, struct drm_agp_info *info); +int drm_legacy_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request); +int drm_legacy_agp_free(struct drm_device *dev, struct drm_agp_buffer *request); +int drm_legacy_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request); +int drm_legacy_agp_bind(struct drm_device *dev, struct drm_agp_binding *request); +#else +static inline struct drm_agp_head *drm_legacy_agp_init(struct drm_device *dev) +{ + return NULL; +} + +static inline int drm_legacy_agp_acquire(struct drm_device *dev) +{ + return -ENODEV; +} + +static inline int drm_legacy_agp_release(struct drm_device *dev) +{ + return -ENODEV; +} + +static inline int drm_legacy_agp_enable(struct drm_device *dev, + struct drm_agp_mode mode) +{ + return -ENODEV; +} + +static inline int drm_legacy_agp_info(struct drm_device *dev, + struct drm_agp_info *info) +{ + return -ENODEV; +} + +static inline int drm_legacy_agp_alloc(struct drm_device *dev, + struct drm_agp_buffer *request) +{ + return -ENODEV; +} + +static inline int drm_legacy_agp_free(struct drm_device *dev, + struct drm_agp_buffer *request) +{ + return -ENODEV; +} + +static inline int drm_legacy_agp_unbind(struct drm_device *dev, + struct drm_agp_binding *request) +{ + return -ENODEV; +} + +static inline int drm_legacy_agp_bind(struct drm_device *dev, + struct drm_agp_binding *request) +{ + return -ENODEV; +} +#endif + /* drm_memory.c */ void drm_legacy_ioremap(struct drm_local_map *map, struct drm_device *dev); void drm_legacy_ioremap_wc(struct drm_local_map *map, struct drm_device *dev); diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h index ab424ddd76..1ddf7783fd 100644 --- a/include/drm/drm_mode_config.h +++ b/include/drm/drm_mode_config.h @@ -909,6 +909,8 @@ struct drm_mode_config { * @allow_fb_modifiers: * * Whether the driver supports fb modifiers in the ADDFB2.1 ioctl call. + * Note that drivers should not set this directly, it is automatically + * set in drm_universal_plane_init(). * * IMPORTANT: * diff --git a/include/drm/drm_print.h b/include/drm/drm_print.h index a3c58c941b..9b66be54dd 100644 --- a/include/drm/drm_print.h +++ b/include/drm/drm_print.h @@ -443,25 +443,25 @@ void drm_dev_dbg(const struct device *dev, enum drm_debug_category category, #define drm_dbg_core(drm, fmt, ...) \ - drm_dev_dbg((drm)->dev, DRM_UT_CORE, fmt, ##__VA_ARGS__) + drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_CORE, fmt, ##__VA_ARGS__) #define drm_dbg(drm, fmt, ...) \ - drm_dev_dbg((drm)->dev, DRM_UT_DRIVER, fmt, ##__VA_ARGS__) + drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_DRIVER, fmt, ##__VA_ARGS__) #define drm_dbg_kms(drm, fmt, ...) \ - drm_dev_dbg((drm)->dev, DRM_UT_KMS, fmt, ##__VA_ARGS__) + drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_KMS, fmt, ##__VA_ARGS__) #define drm_dbg_prime(drm, fmt, ...) \ - drm_dev_dbg((drm)->dev, DRM_UT_PRIME, fmt, ##__VA_ARGS__) + drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_PRIME, fmt, ##__VA_ARGS__) #define drm_dbg_atomic(drm, fmt, ...) \ - drm_dev_dbg((drm)->dev, DRM_UT_ATOMIC, fmt, ##__VA_ARGS__) + drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_ATOMIC, fmt, ##__VA_ARGS__) #define drm_dbg_vbl(drm, fmt, ...) \ - drm_dev_dbg((drm)->dev, DRM_UT_VBL, fmt, ##__VA_ARGS__) + drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_VBL, fmt, ##__VA_ARGS__) #define drm_dbg_state(drm, fmt, ...) \ - drm_dev_dbg((drm)->dev, DRM_UT_STATE, fmt, ##__VA_ARGS__) + drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_STATE, fmt, ##__VA_ARGS__) #define drm_dbg_lease(drm, fmt, ...) \ - drm_dev_dbg((drm)->dev, DRM_UT_LEASE, fmt, ##__VA_ARGS__) + drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_LEASE, fmt, ##__VA_ARGS__) #define drm_dbg_dp(drm, fmt, ...) \ - drm_dev_dbg((drm)->dev, DRM_UT_DP, fmt, ##__VA_ARGS__) + drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_DP, fmt, ##__VA_ARGS__) #define drm_dbg_drmres(drm, fmt, ...) \ - drm_dev_dbg((drm)->dev, DRM_UT_DRMRES, fmt, ##__VA_ARGS__) + drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_DRMRES, fmt, ##__VA_ARGS__) /* diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h index 10225a0a35..d18af49fd0 100644 --- a/include/drm/gpu_scheduler.h +++ b/include/drm/gpu_scheduler.h @@ -275,7 +275,7 @@ struct drm_sched_backend_ops { * @pending_list: the list of jobs which are currently in the job queue. * @job_list_lock: lock to protect the pending_list. * @hang_limit: once the hangs by a job crosses this limit then it is marked - * guilty and it will be considered for scheduling further. + * guilty and it will no longer be considered for scheduling. * @score: score to help loadbalancer pick a idle sched * @_score: score used when the driver doesn't provide one * @ready: marks if the underlying HW is ready to work diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h index ebd0dd1c35..eee18fa53b 100644 --- a/include/drm/i915_pciids.h +++ b/include/drm/i915_pciids.h @@ -640,9 +640,32 @@ INTEL_VGA_DEVICE(0x4681, info), \ INTEL_VGA_DEVICE(0x4682, info), \ INTEL_VGA_DEVICE(0x4683, info), \ + INTEL_VGA_DEVICE(0x4688, info), \ + INTEL_VGA_DEVICE(0x4689, info), \ INTEL_VGA_DEVICE(0x4690, info), \ INTEL_VGA_DEVICE(0x4691, info), \ INTEL_VGA_DEVICE(0x4692, info), \ INTEL_VGA_DEVICE(0x4693, info) +/* ADL-P */ +#define INTEL_ADLP_IDS(info) \ + INTEL_VGA_DEVICE(0x46A0, info), \ + INTEL_VGA_DEVICE(0x46A1, info), \ + INTEL_VGA_DEVICE(0x46A2, info), \ + INTEL_VGA_DEVICE(0x46A3, info), \ + INTEL_VGA_DEVICE(0x46A6, info), \ + INTEL_VGA_DEVICE(0x46A8, info), \ + INTEL_VGA_DEVICE(0x46AA, info), \ + INTEL_VGA_DEVICE(0x462A, info), \ + INTEL_VGA_DEVICE(0x4626, info), \ + INTEL_VGA_DEVICE(0x4628, info), \ + INTEL_VGA_DEVICE(0x46B0, info), \ + INTEL_VGA_DEVICE(0x46B1, info), \ + INTEL_VGA_DEVICE(0x46B2, info), \ + INTEL_VGA_DEVICE(0x46B3, info), \ + INTEL_VGA_DEVICE(0x46C0, info), \ + INTEL_VGA_DEVICE(0x46C1, info), \ + INTEL_VGA_DEVICE(0x46C2, info), \ + INTEL_VGA_DEVICE(0x46C3, info) + #endif /* _I915_PCIIDS_H */ diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index 2155e2e38a..f681bbdbc6 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h @@ -86,6 +86,7 @@ struct ttm_tt; * @base: drm_gem_object superclass data. * @bdev: Pointer to the buffer object device structure. * @type: The bo type. + * @page_alignment: Page alignment. * @destroy: Destruction function. If NULL, kfree is used. * @num_pages: Actual number of pages. * @kref: Reference count of this buffer object. When this refcount reaches @@ -123,6 +124,7 @@ struct ttm_buffer_object { struct ttm_device *bdev; enum ttm_bo_type type; + uint32_t page_alignment; void (*destroy) (struct ttm_buffer_object *); /** @@ -134,7 +136,7 @@ struct ttm_buffer_object { * Members protected by the bo::resv::reserved lock. */ - struct ttm_resource mem; + struct ttm_resource *resource; struct ttm_tt *ttm; bool deleted; @@ -522,19 +524,6 @@ void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct dma_buf_map *map); */ int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo); -/** - * ttm_bo_mmap - mmap out of the ttm device address space. - * - * @filp: filp as input from the mmap method. - * @vma: vma as input from the mmap method. - * @bdev: Pointer to the ttm_device with the address space manager. - * - * This function is intended to be called by the device mmap method. - * if the device address space is to be backed by the bo manager. - */ -int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma, - struct ttm_device *bdev); - /** * ttm_bo_io * @@ -562,25 +551,6 @@ ssize_t ttm_bo_io(struct ttm_device *bdev, struct file *filp, int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx, gfp_t gfp_flags); -/** - * ttm_bo_uses_embedded_gem_object - check if the given bo uses the - * embedded drm_gem_object. - * - * Most ttm drivers are using gem too, so the embedded - * ttm_buffer_object.base will be initialized by the driver (before - * calling ttm_bo_init). It is also possible to use ttm without gem - * though (vmwgfx does that). - * - * This helper will figure whenever a given ttm bo is a gem object too - * or not. - * - * @bo: The bo to check. - */ -static inline bool ttm_bo_uses_embedded_gem_object(struct ttm_buffer_object *bo) -{ - return bo->base.dev != NULL; -} - /** * ttm_bo_pin - Pin the buffer object. * @bo: The buffer object to pin @@ -637,4 +607,6 @@ int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr, void *buf, int len, int write); bool ttm_bo_delayed_delete(struct ttm_device *bdev, bool remove_all); +vm_fault_t ttm_bo_vm_dummy_page(struct vm_fault *vmf, pgprot_t prot); + #endif diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index dbccac957f..68d6069572 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -40,6 +40,7 @@ #include #include "ttm_bo_api.h" +#include "ttm_kmap_iter.h" #include "ttm_placement.h" #include "ttm_tt.h" #include "ttm_pool.h" @@ -96,7 +97,7 @@ struct ttm_lru_bulk_move { */ int ttm_bo_mem_space(struct ttm_buffer_object *bo, struct ttm_placement *placement, - struct ttm_resource *mem, + struct ttm_resource **mem, struct ttm_operation_ctx *ctx); /** @@ -181,15 +182,15 @@ static inline void ttm_bo_move_to_lru_tail_unlocked(struct ttm_buffer_object *bo) { spin_lock(&bo->bdev->lru_lock); - ttm_bo_move_to_lru_tail(bo, &bo->mem, NULL); + ttm_bo_move_to_lru_tail(bo, bo->resource, NULL); spin_unlock(&bo->bdev->lru_lock); } static inline void ttm_bo_assign_mem(struct ttm_buffer_object *bo, struct ttm_resource *new_mem) { - bo->mem = *new_mem; - new_mem->mm_node = NULL; + WARN_ON(bo->resource); + bo->resource = new_mem; } /** @@ -202,9 +203,7 @@ static inline void ttm_bo_assign_mem(struct ttm_buffer_object *bo, static inline void ttm_bo_move_null(struct ttm_buffer_object *bo, struct ttm_resource *new_mem) { - struct ttm_resource *old_mem = &bo->mem; - - WARN_ON(old_mem->mm_node != NULL); + ttm_resource_free(bo, &bo->resource); ttm_bo_assign_mem(bo, new_mem); } @@ -272,6 +271,23 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, bool pipeline, struct ttm_resource *new_mem); +/** + * ttm_bo_move_accel_cleanup. + * + * @bo: A pointer to a struct ttm_buffer_object. + * @new_mem: struct ttm_resource indicating where to move. + * + * Special case of ttm_bo_move_accel_cleanup where the bo is guaranteed + * by the caller to be idle. Typically used after memcpy buffer moves. + */ +static inline void ttm_bo_move_sync_cleanup(struct ttm_buffer_object *bo, + struct ttm_resource *new_mem) +{ + int ret = ttm_bo_move_accel_cleanup(bo, NULL, true, false, new_mem); + + WARN_ON(ret); +} + /** * ttm_bo_pipeline_gutting. * @@ -306,30 +322,14 @@ int ttm_bo_tt_bind(struct ttm_buffer_object *bo, struct ttm_resource *mem); */ void ttm_bo_tt_destroy(struct ttm_buffer_object *bo); -/** - * ttm_range_man_init - * - * @bdev: ttm device - * @type: memory manager type - * @use_tt: if the memory manager uses tt - * @p_size: size of area to be managed in pages. - * - * Initialise a generic range manager for the selected memory type. - * The range manager is installed for this device in the type slot. - */ -int ttm_range_man_init(struct ttm_device *bdev, - unsigned type, bool use_tt, - unsigned long p_size); - -/** - * ttm_range_man_fini - * - * @bdev: ttm device - * @type: memory manager type - * - * Remove the generic range manager from a slot and tear it down. - */ -int ttm_range_man_fini(struct ttm_device *bdev, - unsigned type); +void ttm_move_memcpy(struct ttm_buffer_object *bo, + u32 num_pages, + struct ttm_kmap_iter *dst_iter, + struct ttm_kmap_iter *src_iter); +struct ttm_kmap_iter * +ttm_kmap_iter_iomap_init(struct ttm_kmap_iter_iomap *iter_io, + struct io_mapping *iomap, + struct sg_table *st, + resource_size_t start); #endif diff --git a/include/drm/ttm/ttm_caching.h b/include/drm/ttm/ttm_caching.h index a0b4a49fa4..3c9dd65f5a 100644 --- a/include/drm/ttm/ttm_caching.h +++ b/include/drm/ttm/ttm_caching.h @@ -33,4 +33,6 @@ enum ttm_caching { ttm_cached }; +pgprot_t ttm_prot_from_caching(enum ttm_caching caching, pgprot_t tmp); + #endif diff --git a/include/drm/ttm/ttm_device.h b/include/drm/ttm/ttm_device.h index 7c8f87bd52..cd592f8e94 100644 --- a/include/drm/ttm/ttm_device.h +++ b/include/drm/ttm/ttm_device.h @@ -161,21 +161,6 @@ struct ttm_device_funcs { struct ttm_resource *new_mem, struct ttm_place *hop); - /** - * struct ttm_bo_driver_member verify_access - * - * @bo: Pointer to a buffer object. - * @filp: Pointer to a struct file trying to access the object. - * - * Called from the map / write / read methods to verify that the - * caller is permitted to access the buffer object. - * This member may be set to NULL, which will refuse this kind of - * access for all buffer objects. - * This function should return 0 if access is granted, -EPERM otherwise. - */ - int (*verify_access)(struct ttm_buffer_object *bo, - struct file *filp); - /** * Hook to notify driver about a resource delete. */ diff --git a/include/drm/ttm/ttm_kmap_iter.h b/include/drm/ttm/ttm_kmap_iter.h new file mode 100644 index 0000000000..8bb00fd39d --- /dev/null +++ b/include/drm/ttm/ttm_kmap_iter.h @@ -0,0 +1,61 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2021 Intel Corporation + */ +#ifndef __TTM_KMAP_ITER_H__ +#define __TTM_KMAP_ITER_H__ + +#include + +struct ttm_kmap_iter; +struct dma_buf_map; + +/** + * struct ttm_kmap_iter_ops - Ops structure for a struct + * ttm_kmap_iter. + * @maps_tt: Whether the iterator maps TT memory directly, as opposed + * mapping a TT through an aperture. Both these modes have + * struct ttm_resource_manager::use_tt set, but the latter typically + * returns is_iomem == true from ttm_mem_io_reserve. + */ +struct ttm_kmap_iter_ops { + /** + * kmap_local() - Map a PAGE_SIZE part of the resource using + * kmap_local semantics. + * @res_iter: Pointer to the struct ttm_kmap_iter representing + * the resource. + * @dmap: The struct dma_buf_map holding the virtual address after + * the operation. + * @i: The location within the resource to map. PAGE_SIZE granularity. + */ + void (*map_local)(struct ttm_kmap_iter *res_iter, + struct dma_buf_map *dmap, pgoff_t i); + /** + * unmap_local() - Unmap a PAGE_SIZE part of the resource previously + * mapped using kmap_local. + * @res_iter: Pointer to the struct ttm_kmap_iter representing + * the resource. + * @dmap: The struct dma_buf_map holding the virtual address after + * the operation. + */ + void (*unmap_local)(struct ttm_kmap_iter *res_iter, + struct dma_buf_map *dmap); + bool maps_tt; +}; + +/** + * struct ttm_kmap_iter - Iterator for kmap_local type operations on a + * resource. + * @ops: Pointer to the operations struct. + * + * This struct is intended to be embedded in a resource-specific specialization + * implementing operations for the resource. + * + * Nothing stops us from extending the operations to vmap, vmap_pfn etc, + * replacing some or parts of the ttm_bo_util. cpu-map functionality. + */ +struct ttm_kmap_iter { + const struct ttm_kmap_iter_ops *ops; +}; + +#endif /* __TTM_KMAP_ITER_H__ */ diff --git a/include/drm/ttm/ttm_range_manager.h b/include/drm/ttm/ttm_range_manager.h new file mode 100644 index 0000000000..22b6fa42ac --- /dev/null +++ b/include/drm/ttm/ttm_range_manager.h @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ + +#ifndef _TTM_RANGE_MANAGER_H_ +#define _TTM_RANGE_MANAGER_H_ + +#include +#include + +/** + * struct ttm_range_mgr_node + * + * @base: base clase we extend + * @mm_nodes: MM nodes, usually 1 + * + * Extending the ttm_resource object to manage an address space allocation with + * one or more drm_mm_nodes. + */ +struct ttm_range_mgr_node { + struct ttm_resource base; + struct drm_mm_node mm_nodes[]; +}; + +/** + * to_ttm_range_mgr_node + * + * @res: the resource to upcast + * + * Upcast the ttm_resource object into a ttm_range_mgr_node object. + */ +static inline struct ttm_range_mgr_node * +to_ttm_range_mgr_node(struct ttm_resource *res) +{ + return container_of(res, struct ttm_range_mgr_node, base); +} + +int ttm_range_man_init(struct ttm_device *bdev, + unsigned type, bool use_tt, + unsigned long p_size); +int ttm_range_man_fini(struct ttm_device *bdev, + unsigned type); + +#endif diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h index 6164ccf4f3..140b6b9a8b 100644 --- a/include/drm/ttm/ttm_resource.h +++ b/include/drm/ttm/ttm_resource.h @@ -27,9 +27,11 @@ #include #include +#include #include #include #include +#include #define TTM_MAX_BO_PRIORITY 4U @@ -38,6 +40,10 @@ struct ttm_resource_manager; struct ttm_resource; struct ttm_place; struct ttm_buffer_object; +struct dma_buf_map; +struct io_mapping; +struct sg_table; +struct scatterlist; struct ttm_resource_manager_func { /** @@ -45,46 +51,38 @@ struct ttm_resource_manager_func { * * @man: Pointer to a memory type manager. * @bo: Pointer to the buffer object we're allocating space for. - * @placement: Placement details. - * @flags: Additional placement flags. - * @mem: Pointer to a struct ttm_resource to be filled in. + * @place: Placement details. + * @res: Resulting pointer to the ttm_resource. * * This function should allocate space in the memory type managed - * by @man. Placement details if - * applicable are given by @placement. If successful, - * @mem::mm_node should be set to a non-null value, and - * @mem::start should be set to a value identifying the beginning + * by @man. Placement details if applicable are given by @place. If + * successful, a filled in ttm_resource object should be returned in + * @res. @res::start should be set to a value identifying the beginning * of the range allocated, and the function should return zero. - * If the memory region accommodate the buffer object, @mem::mm_node - * should be set to NULL, and the function should return 0. + * If the manager can't fulfill the request -ENOSPC should be returned. * If a system error occurred, preventing the request to be fulfilled, * the function should return a negative error code. * - * Note that @mem::mm_node will only be dereferenced by - * struct ttm_resource_manager functions and optionally by the driver, - * which has knowledge of the underlying type. - * - * This function may not be called from within atomic context, so - * an implementation can and must use either a mutex or a spinlock to - * protect any data structures managing the space. + * This function may not be called from within atomic context and needs + * to take care of its own locking to protect any data structures + * managing the space. */ int (*alloc)(struct ttm_resource_manager *man, struct ttm_buffer_object *bo, const struct ttm_place *place, - struct ttm_resource *mem); + struct ttm_resource **res); /** * struct ttm_resource_manager_func member free * * @man: Pointer to a memory type manager. - * @mem: Pointer to a struct ttm_resource to be filled in. + * @res: Pointer to a struct ttm_resource to be freed. * - * This function frees memory type resources previously allocated - * and that are identified by @mem::mm_node and @mem::start. May not - * be called from within atomic context. + * This function frees memory type resources previously allocated. + * May not be called from within atomic context. */ void (*free)(struct ttm_resource_manager *man, - struct ttm_resource *mem); + struct ttm_resource *res); /** * struct ttm_resource_manager_func member debug @@ -158,10 +156,9 @@ struct ttm_bus_placement { /** * struct ttm_resource * - * @mm_node: Memory manager node. - * @size: Requested size of memory region. - * @num_pages: Actual size of memory region in pages. - * @page_alignment: Page alignment. + * @start: Start of the allocation. + * @num_pages: Actual size of resource in pages. + * @mem_type: Resource type of the allocation. * @placement: Placement flags. * @bus: Placement on io bus accessible to the CPU * @@ -169,15 +166,52 @@ struct ttm_bus_placement { * buffer object. */ struct ttm_resource { - void *mm_node; unsigned long start; unsigned long num_pages; - uint32_t page_alignment; uint32_t mem_type; uint32_t placement; struct ttm_bus_placement bus; }; +/** + * struct ttm_kmap_iter_iomap - Specialization for a struct io_mapping + + * struct sg_table backed struct ttm_resource. + * @base: Embedded struct ttm_kmap_iter providing the usage interface. + * @iomap: struct io_mapping representing the underlying linear io_memory. + * @st: sg_table into @iomap, representing the memory of the struct ttm_resource. + * @start: Offset that needs to be subtracted from @st to make + * sg_dma_address(st->sgl) - @start == 0 for @iomap start. + * @cache: Scatterlist traversal cache for fast lookups. + * @cache.sg: Pointer to the currently cached scatterlist segment. + * @cache.i: First index of @sg. PAGE_SIZE granularity. + * @cache.end: Last index + 1 of @sg. PAGE_SIZE granularity. + * @cache.offs: First offset into @iomap of @sg. PAGE_SIZE granularity. + */ +struct ttm_kmap_iter_iomap { + struct ttm_kmap_iter base; + struct io_mapping *iomap; + struct sg_table *st; + resource_size_t start; + struct { + struct scatterlist *sg; + pgoff_t i; + pgoff_t end; + pgoff_t offs; + } cache; +}; + +/** + * struct ttm_kmap_iter_linear_io - Iterator specialization for linear io + * @base: The base iterator + * @dmap: Points to the starting address of the region + * @needs_unmap: Whether we need to unmap on fini + */ +struct ttm_kmap_iter_linear_io { + struct ttm_kmap_iter base; + struct dma_buf_map dmap; + bool needs_unmap; +}; + /** * ttm_resource_manager_set_used * @@ -225,10 +259,13 @@ ttm_resource_manager_cleanup(struct ttm_resource_manager *man) man->move = NULL; } +void ttm_resource_init(struct ttm_buffer_object *bo, + const struct ttm_place *place, + struct ttm_resource *res); int ttm_resource_alloc(struct ttm_buffer_object *bo, const struct ttm_place *place, - struct ttm_resource *res); -void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource *res); + struct ttm_resource **res); +void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res); void ttm_resource_manager_init(struct ttm_resource_manager *man, unsigned long p_size); @@ -239,4 +276,20 @@ int ttm_resource_manager_evict_all(struct ttm_device *bdev, void ttm_resource_manager_debug(struct ttm_resource_manager *man, struct drm_printer *p); +struct ttm_kmap_iter * +ttm_kmap_iter_iomap_init(struct ttm_kmap_iter_iomap *iter_io, + struct io_mapping *iomap, + struct sg_table *st, + resource_size_t start); + +struct ttm_kmap_iter_linear_io; + +struct ttm_kmap_iter * +ttm_kmap_iter_linear_io_init(struct ttm_kmap_iter_linear_io *iter_io, + struct ttm_device *bdev, + struct ttm_resource *mem); + +void ttm_kmap_iter_linear_io_fini(struct ttm_kmap_iter_linear_io *iter_io, + struct ttm_device *bdev, + struct ttm_resource *mem); #endif diff --git a/include/drm/ttm/ttm_tt.h b/include/drm/ttm/ttm_tt.h index 134d09ef77..818680c6a8 100644 --- a/include/drm/ttm/ttm_tt.h +++ b/include/drm/ttm/ttm_tt.h @@ -29,6 +29,7 @@ #include #include +#include struct ttm_bo_device; struct ttm_tt; @@ -69,6 +70,18 @@ struct ttm_tt { enum ttm_caching caching; }; +/** + * struct ttm_kmap_iter_tt - Specialization of a mappig iterator for a tt. + * @base: Embedded struct ttm_kmap_iter providing the usage interface + * @tt: Cached struct ttm_tt. + * @prot: Cached page protection for mapping. + */ +struct ttm_kmap_iter_tt { + struct ttm_kmap_iter base; + struct ttm_tt *tt; + pgprot_t prot; +}; + static inline bool ttm_tt_is_populated(struct ttm_tt *tt) { return tt->page_flags & TTM_PAGE_FLAG_PRIV_POPULATED; @@ -157,8 +170,24 @@ int ttm_tt_populate(struct ttm_device *bdev, struct ttm_tt *ttm, struct ttm_oper */ void ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm); +/** + * ttm_tt_mark_for_clear - Mark pages for clearing on populate. + * + * @ttm: Pointer to the ttm_tt structure + * + * Marks pages for clearing so that the next time the page vector is + * populated, the pages will be cleared. + */ +static inline void ttm_tt_mark_for_clear(struct ttm_tt *ttm) +{ + ttm->page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC; +} + void ttm_tt_mgr_init(unsigned long num_pages, unsigned long num_dma32_pages); +struct ttm_kmap_iter *ttm_kmap_iter_tt_init(struct ttm_kmap_iter_tt *iter_tt, + struct ttm_tt *tt); + #if IS_ENABLED(CONFIG_AGP) #include diff --git a/include/dt-bindings/clock/actions,s500-cmu.h b/include/dt-bindings/clock/actions,s500-cmu.h index a250a52a61..a237eb26ac 100644 --- a/include/dt-bindings/clock/actions,s500-cmu.h +++ b/include/dt-bindings/clock/actions,s500-cmu.h @@ -74,10 +74,12 @@ #define CLK_RMII_REF 54 #define CLK_GPIO 55 -/* system clock (part 2) */ +/* additional clocks */ #define CLK_APB 56 #define CLK_DMAC 57 +#define CLK_NIC 58 +#define CLK_ETHERNET 59 -#define CLK_NR_CLKS (CLK_DMAC + 1) +#define CLK_NR_CLKS (CLK_ETHERNET + 1) #endif /* __DT_BINDINGS_CLOCK_S500_CMU_H */ diff --git a/include/dt-bindings/clock/hi3559av100-clock.h b/include/dt-bindings/clock/hi3559av100-clock.h new file mode 100644 index 0000000000..5fe7689010 --- /dev/null +++ b/include/dt-bindings/clock/hi3559av100-clock.h @@ -0,0 +1,165 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later or BSD-2-Clause */ +/* + * Copyright (c) 2019-2020, Huawei Tech. Co., Ltd. + * + * Author: Dongjiu Geng + */ + +#ifndef __DTS_HI3559AV100_CLOCK_H +#define __DTS_HI3559AV100_CLOCK_H + +/* fixed rate */ +#define HI3559AV100_FIXED_1188M 1 +#define HI3559AV100_FIXED_1000M 2 +#define HI3559AV100_FIXED_842M 3 +#define HI3559AV100_FIXED_792M 4 +#define HI3559AV100_FIXED_750M 5 +#define HI3559AV100_FIXED_710M 6 +#define HI3559AV100_FIXED_680M 7 +#define HI3559AV100_FIXED_667M 8 +#define HI3559AV100_FIXED_631M 9 +#define HI3559AV100_FIXED_600M 10 +#define HI3559AV100_FIXED_568M 11 +#define HI3559AV100_FIXED_500M 12 +#define HI3559AV100_FIXED_475M 13 +#define HI3559AV100_FIXED_428M 14 +#define HI3559AV100_FIXED_400M 15 +#define HI3559AV100_FIXED_396M 16 +#define HI3559AV100_FIXED_300M 17 +#define HI3559AV100_FIXED_250M 18 +#define HI3559AV100_FIXED_198M 19 +#define HI3559AV100_FIXED_187p5M 20 +#define HI3559AV100_FIXED_150M 21 +#define HI3559AV100_FIXED_148p5M 22 +#define HI3559AV100_FIXED_125M 23 +#define HI3559AV100_FIXED_107M 24 +#define HI3559AV100_FIXED_100M 25 +#define HI3559AV100_FIXED_99M 26 +#define HI3559AV100_FIXED_74p25M 27 +#define HI3559AV100_FIXED_72M 28 +#define HI3559AV100_FIXED_60M 29 +#define HI3559AV100_FIXED_54M 30 +#define HI3559AV100_FIXED_50M 31 +#define HI3559AV100_FIXED_49p5M 32 +#define HI3559AV100_FIXED_37p125M 33 +#define HI3559AV100_FIXED_36M 34 +#define HI3559AV100_FIXED_32p4M 35 +#define HI3559AV100_FIXED_27M 36 +#define HI3559AV100_FIXED_25M 37 +#define HI3559AV100_FIXED_24M 38 +#define HI3559AV100_FIXED_12M 39 +#define HI3559AV100_FIXED_3M 40 +#define HI3559AV100_FIXED_1p6M 41 +#define HI3559AV100_FIXED_400K 42 +#define HI3559AV100_FIXED_100K 43 +#define HI3559AV100_FIXED_200M 44 +#define HI3559AV100_FIXED_75M 75 + +#define HI3559AV100_I2C0_CLK 50 +#define HI3559AV100_I2C1_CLK 51 +#define HI3559AV100_I2C2_CLK 52 +#define HI3559AV100_I2C3_CLK 53 +#define HI3559AV100_I2C4_CLK 54 +#define HI3559AV100_I2C5_CLK 55 +#define HI3559AV100_I2C6_CLK 56 +#define HI3559AV100_I2C7_CLK 57 +#define HI3559AV100_I2C8_CLK 58 +#define HI3559AV100_I2C9_CLK 59 +#define HI3559AV100_I2C10_CLK 60 +#define HI3559AV100_I2C11_CLK 61 + +#define HI3559AV100_SPI0_CLK 62 +#define HI3559AV100_SPI1_CLK 63 +#define HI3559AV100_SPI2_CLK 64 +#define HI3559AV100_SPI3_CLK 65 +#define HI3559AV100_SPI4_CLK 66 +#define HI3559AV100_SPI5_CLK 67 +#define HI3559AV100_SPI6_CLK 68 + +#define HI3559AV100_EDMAC_CLK 69 +#define HI3559AV100_EDMAC_AXICLK 70 +#define HI3559AV100_EDMAC1_CLK 71 +#define HI3559AV100_EDMAC1_AXICLK 72 +#define HI3559AV100_VDMAC_CLK 73 + +/* mux clocks */ +#define HI3559AV100_FMC_MUX 80 +#define HI3559AV100_SYSAPB_MUX 81 +#define HI3559AV100_UART_MUX 82 +#define HI3559AV100_SYSBUS_MUX 83 +#define HI3559AV100_A73_MUX 84 +#define HI3559AV100_MMC0_MUX 85 +#define HI3559AV100_MMC1_MUX 86 +#define HI3559AV100_MMC2_MUX 87 +#define HI3559AV100_MMC3_MUX 88 + +/* gate clocks */ +#define HI3559AV100_FMC_CLK 90 +#define HI3559AV100_UART0_CLK 91 +#define HI3559AV100_UART1_CLK 92 +#define HI3559AV100_UART2_CLK 93 +#define HI3559AV100_UART3_CLK 94 +#define HI3559AV100_UART4_CLK 95 +#define HI3559AV100_MMC0_CLK 96 +#define HI3559AV100_MMC1_CLK 97 +#define HI3559AV100_MMC2_CLK 98 +#define HI3559AV100_MMC3_CLK 99 + +#define HI3559AV100_ETH_CLK 100 +#define HI3559AV100_ETH_MACIF_CLK 101 +#define HI3559AV100_ETH1_CLK 102 +#define HI3559AV100_ETH1_MACIF_CLK 103 + +/* complex */ +#define HI3559AV100_MAC0_CLK 110 +#define HI3559AV100_MAC1_CLK 111 +#define HI3559AV100_SATA_CLK 112 +#define HI3559AV100_USB_CLK 113 +#define HI3559AV100_USB1_CLK 114 + +/* pll clocks */ +#define HI3559AV100_APLL_CLK 250 +#define HI3559AV100_GPLL_CLK 251 + +#define HI3559AV100_CRG_NR_CLKS 256 + +#define HI3559AV100_SHUB_SOURCE_SOC_24M 0 +#define HI3559AV100_SHUB_SOURCE_SOC_200M 1 +#define HI3559AV100_SHUB_SOURCE_SOC_300M 2 +#define HI3559AV100_SHUB_SOURCE_PLL 3 +#define HI3559AV100_SHUB_SOURCE_CLK 4 + +#define HI3559AV100_SHUB_I2C0_CLK 10 +#define HI3559AV100_SHUB_I2C1_CLK 11 +#define HI3559AV100_SHUB_I2C2_CLK 12 +#define HI3559AV100_SHUB_I2C3_CLK 13 +#define HI3559AV100_SHUB_I2C4_CLK 14 +#define HI3559AV100_SHUB_I2C5_CLK 15 +#define HI3559AV100_SHUB_I2C6_CLK 16 +#define HI3559AV100_SHUB_I2C7_CLK 17 + +#define HI3559AV100_SHUB_SPI_SOURCE_CLK 20 +#define HI3559AV100_SHUB_SPI4_SOURCE_CLK 21 +#define HI3559AV100_SHUB_SPI0_CLK 22 +#define HI3559AV100_SHUB_SPI1_CLK 23 +#define HI3559AV100_SHUB_SPI2_CLK 24 +#define HI3559AV100_SHUB_SPI3_CLK 25 +#define HI3559AV100_SHUB_SPI4_CLK 26 + +#define HI3559AV100_SHUB_UART_CLK_32K 30 +#define HI3559AV100_SHUB_UART_SOURCE_CLK 31 +#define HI3559AV100_SHUB_UART_DIV_CLK 32 +#define HI3559AV100_SHUB_UART0_CLK 33 +#define HI3559AV100_SHUB_UART1_CLK 34 +#define HI3559AV100_SHUB_UART2_CLK 35 +#define HI3559AV100_SHUB_UART3_CLK 36 +#define HI3559AV100_SHUB_UART4_CLK 37 +#define HI3559AV100_SHUB_UART5_CLK 38 +#define HI3559AV100_SHUB_UART6_CLK 39 + +#define HI3559AV100_SHUB_EDMAC_CLK 40 + +#define HI3559AV100_SHUB_NR_CLKS 50 + +#endif /* __DTS_HI3559AV100_CLOCK_H */ + diff --git a/include/dt-bindings/clock/imx8-clock.h b/include/dt-bindings/clock/imx8-clock.h index 82b1fc8d1e..2e60ce4d26 100644 --- a/include/dt-bindings/clock/imx8-clock.h +++ b/include/dt-bindings/clock/imx8-clock.h @@ -7,134 +7,6 @@ #ifndef __DT_BINDINGS_CLOCK_IMX_H #define __DT_BINDINGS_CLOCK_IMX_H -/* SCU Clocks */ - -#define IMX_CLK_DUMMY 0 - -/* CPU */ -#define IMX_A35_CLK 1 - -/* LSIO SS */ -#define IMX_LSIO_MEM_CLK 2 -#define IMX_LSIO_BUS_CLK 3 -#define IMX_LSIO_PWM0_CLK 10 -#define IMX_LSIO_PWM1_CLK 11 -#define IMX_LSIO_PWM2_CLK 12 -#define IMX_LSIO_PWM3_CLK 13 -#define IMX_LSIO_PWM4_CLK 14 -#define IMX_LSIO_PWM5_CLK 15 -#define IMX_LSIO_PWM6_CLK 16 -#define IMX_LSIO_PWM7_CLK 17 -#define IMX_LSIO_GPT0_CLK 18 -#define IMX_LSIO_GPT1_CLK 19 -#define IMX_LSIO_GPT2_CLK 20 -#define IMX_LSIO_GPT3_CLK 21 -#define IMX_LSIO_GPT4_CLK 22 -#define IMX_LSIO_FSPI0_CLK 23 -#define IMX_LSIO_FSPI1_CLK 24 - -/* Connectivity SS */ -#define IMX_CONN_AXI_CLK_ROOT 30 -#define IMX_CONN_AHB_CLK_ROOT 31 -#define IMX_CONN_IPG_CLK_ROOT 32 -#define IMX_CONN_SDHC0_CLK 40 -#define IMX_CONN_SDHC1_CLK 41 -#define IMX_CONN_SDHC2_CLK 42 -#define IMX_CONN_ENET0_ROOT_CLK 43 -#define IMX_CONN_ENET0_BYPASS_CLK 44 -#define IMX_CONN_ENET0_RGMII_CLK 45 -#define IMX_CONN_ENET1_ROOT_CLK 46 -#define IMX_CONN_ENET1_BYPASS_CLK 47 -#define IMX_CONN_ENET1_RGMII_CLK 48 -#define IMX_CONN_GPMI_BCH_IO_CLK 49 -#define IMX_CONN_GPMI_BCH_CLK 50 -#define IMX_CONN_USB2_ACLK 51 -#define IMX_CONN_USB2_BUS_CLK 52 -#define IMX_CONN_USB2_LPM_CLK 53 - -/* HSIO SS */ -#define IMX_HSIO_AXI_CLK 60 -#define IMX_HSIO_PER_CLK 61 - -/* Display controller SS */ -#define IMX_DC_AXI_EXT_CLK 70 -#define IMX_DC_AXI_INT_CLK 71 -#define IMX_DC_CFG_CLK 72 -#define IMX_DC0_PLL0_CLK 80 -#define IMX_DC0_PLL1_CLK 81 -#define IMX_DC0_DISP0_CLK 82 -#define IMX_DC0_DISP1_CLK 83 -#define IMX_DC0_BYPASS0_CLK 84 -#define IMX_DC0_BYPASS1_CLK 85 - -/* MIPI-LVDS SS */ -#define IMX_MIPI_IPG_CLK 90 -#define IMX_MIPI0_PIXEL_CLK 100 -#define IMX_MIPI0_BYPASS_CLK 101 -#define IMX_MIPI0_LVDS_PIXEL_CLK 102 -#define IMX_MIPI0_LVDS_BYPASS_CLK 103 -#define IMX_MIPI0_LVDS_PHY_CLK 104 -#define IMX_MIPI0_I2C0_CLK 105 -#define IMX_MIPI0_I2C1_CLK 106 -#define IMX_MIPI0_PWM0_CLK 107 -#define IMX_MIPI1_PIXEL_CLK 108 -#define IMX_MIPI1_BYPASS_CLK 109 -#define IMX_MIPI1_LVDS_PIXEL_CLK 110 -#define IMX_MIPI1_LVDS_BYPASS_CLK 111 -#define IMX_MIPI1_LVDS_PHY_CLK 112 -#define IMX_MIPI1_I2C0_CLK 113 -#define IMX_MIPI1_I2C1_CLK 114 -#define IMX_MIPI1_PWM0_CLK 115 - -/* IMG SS */ -#define IMX_IMG_AXI_CLK 120 -#define IMX_IMG_IPG_CLK 121 -#define IMX_IMG_PXL_CLK 122 - -/* MIPI-CSI SS */ -#define IMX_CSI0_CORE_CLK 130 -#define IMX_CSI0_ESC_CLK 131 -#define IMX_CSI0_PWM0_CLK 132 -#define IMX_CSI0_I2C0_CLK 133 - -/* PARALLER CSI SS */ -#define IMX_PARALLEL_CSI_DPLL_CLK 140 -#define IMX_PARALLEL_CSI_PIXEL_CLK 141 -#define IMX_PARALLEL_CSI_MCLK_CLK 142 - -/* VPU SS */ -#define IMX_VPU_ENC_CLK 150 -#define IMX_VPU_DEC_CLK 151 - -/* GPU SS */ -#define IMX_GPU0_CORE_CLK 160 -#define IMX_GPU0_SHADER_CLK 161 - -/* ADMA SS */ -#define IMX_ADMA_IPG_CLK_ROOT 165 -#define IMX_ADMA_UART0_CLK 170 -#define IMX_ADMA_UART1_CLK 171 -#define IMX_ADMA_UART2_CLK 172 -#define IMX_ADMA_UART3_CLK 173 -#define IMX_ADMA_SPI0_CLK 174 -#define IMX_ADMA_SPI1_CLK 175 -#define IMX_ADMA_SPI2_CLK 176 -#define IMX_ADMA_SPI3_CLK 177 -#define IMX_ADMA_CAN0_CLK 178 -#define IMX_ADMA_CAN1_CLK 179 -#define IMX_ADMA_CAN2_CLK 180 -#define IMX_ADMA_I2C0_CLK 181 -#define IMX_ADMA_I2C1_CLK 182 -#define IMX_ADMA_I2C2_CLK 183 -#define IMX_ADMA_I2C3_CLK 184 -#define IMX_ADMA_FTM0_CLK 185 -#define IMX_ADMA_FTM1_CLK 186 -#define IMX_ADMA_ADC0_CLK 187 -#define IMX_ADMA_PWM_CLK 188 -#define IMX_ADMA_LCD_CLK 189 - -#define IMX_SCU_CLK_END 190 - /* LPCG clocks */ /* LSIO SS LPCG */ diff --git a/include/dt-bindings/clock/imx8mq-clock.h b/include/dt-bindings/clock/imx8mq-clock.h index 82e907ce7b..afa74d7ba1 100644 --- a/include/dt-bindings/clock/imx8mq-clock.h +++ b/include/dt-bindings/clock/imx8mq-clock.h @@ -405,25 +405,6 @@ #define IMX8MQ_VIDEO2_PLL1_REF_SEL 266 -#define IMX8MQ_SYS1_PLL_40M_CG 267 -#define IMX8MQ_SYS1_PLL_80M_CG 268 -#define IMX8MQ_SYS1_PLL_100M_CG 269 -#define IMX8MQ_SYS1_PLL_133M_CG 270 -#define IMX8MQ_SYS1_PLL_160M_CG 271 -#define IMX8MQ_SYS1_PLL_200M_CG 272 -#define IMX8MQ_SYS1_PLL_266M_CG 273 -#define IMX8MQ_SYS1_PLL_400M_CG 274 -#define IMX8MQ_SYS1_PLL_800M_CG 275 -#define IMX8MQ_SYS2_PLL_50M_CG 276 -#define IMX8MQ_SYS2_PLL_100M_CG 277 -#define IMX8MQ_SYS2_PLL_125M_CG 278 -#define IMX8MQ_SYS2_PLL_166M_CG 279 -#define IMX8MQ_SYS2_PLL_200M_CG 280 -#define IMX8MQ_SYS2_PLL_250M_CG 281 -#define IMX8MQ_SYS2_PLL_333M_CG 282 -#define IMX8MQ_SYS2_PLL_500M_CG 283 -#define IMX8MQ_SYS2_PLL_1000M_CG 284 - #define IMX8MQ_CLK_GPU_CORE 285 #define IMX8MQ_CLK_GPU_SHADER 286 #define IMX8MQ_CLK_M4_CORE 287 diff --git a/include/dt-bindings/clock/jz4760-cgu.h b/include/dt-bindings/clock/jz4760-cgu.h new file mode 100644 index 0000000000..4bb2e19c47 --- /dev/null +++ b/include/dt-bindings/clock/jz4760-cgu.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides clock numbers for the ingenic,jz4760-cgu DT binding. + */ + +#ifndef __DT_BINDINGS_CLOCK_JZ4760_CGU_H__ +#define __DT_BINDINGS_CLOCK_JZ4760_CGU_H__ + +#define JZ4760_CLK_EXT 0 +#define JZ4760_CLK_OSC32K 1 +#define JZ4760_CLK_PLL0 2 +#define JZ4760_CLK_PLL0_HALF 3 +#define JZ4760_CLK_PLL1 4 +#define JZ4760_CLK_CCLK 5 +#define JZ4760_CLK_HCLK 6 +#define JZ4760_CLK_SCLK 7 +#define JZ4760_CLK_H2CLK 8 +#define JZ4760_CLK_MCLK 9 +#define JZ4760_CLK_PCLK 10 +#define JZ4760_CLK_MMC_MUX 11 +#define JZ4760_CLK_MMC0 12 +#define JZ4760_CLK_MMC1 13 +#define JZ4760_CLK_MMC2 14 +#define JZ4760_CLK_CIM 15 +#define JZ4760_CLK_UHC 16 +#define JZ4760_CLK_GPU 17 +#define JZ4760_CLK_GPS 18 +#define JZ4760_CLK_SSI_MUX 19 +#define JZ4760_CLK_PCM 20 +#define JZ4760_CLK_I2S 21 +#define JZ4760_CLK_OTG 22 +#define JZ4760_CLK_SSI0 23 +#define JZ4760_CLK_SSI1 24 +#define JZ4760_CLK_SSI2 25 +#define JZ4760_CLK_DMA 26 +#define JZ4760_CLK_I2C0 27 +#define JZ4760_CLK_I2C1 28 +#define JZ4760_CLK_UART0 29 +#define JZ4760_CLK_UART1 30 +#define JZ4760_CLK_UART2 31 +#define JZ4760_CLK_UART3 32 +#define JZ4760_CLK_IPU 33 +#define JZ4760_CLK_ADC 34 +#define JZ4760_CLK_AIC 35 +#define JZ4760_CLK_VPU 36 +#define JZ4760_CLK_UHC_PHY 37 +#define JZ4760_CLK_OTG_PHY 38 +#define JZ4760_CLK_EXT512 39 +#define JZ4760_CLK_RTC 40 +#define JZ4760_CLK_LPCLK_DIV 41 +#define JZ4760_CLK_TVE 42 +#define JZ4760_CLK_LPCLK 43 + +#endif /* __DT_BINDINGS_CLOCK_JZ4760_CGU_H__ */ diff --git a/include/dt-bindings/clock/mt8173-clk.h b/include/dt-bindings/clock/mt8173-clk.h index 3acebe937b..3d00c98b96 100644 --- a/include/dt-bindings/clock/mt8173-clk.h +++ b/include/dt-bindings/clock/mt8173-clk.h @@ -186,7 +186,6 @@ #define CLK_INFRA_PMICWRAP 11 #define CLK_INFRA_CLK_13M 12 #define CLK_INFRA_CA53SEL 13 -#define CLK_INFRA_CA57SEL 14 /* Deprecated. Don't use it. */ #define CLK_INFRA_CA72SEL 14 #define CLK_INFRA_NR_CLK 15 diff --git a/include/dt-bindings/clock/qcom,camcc-sm8250.h b/include/dt-bindings/clock/qcom,camcc-sm8250.h new file mode 100644 index 0000000000..383ead1760 --- /dev/null +++ b/include/dt-bindings/clock/qcom,camcc-sm8250.h @@ -0,0 +1,138 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLK_QCOM_CAM_CC_SM8250_H +#define _DT_BINDINGS_CLK_QCOM_CAM_CC_SM8250_H + +/* CAM_CC clocks */ +#define CAM_CC_BPS_AHB_CLK 0 +#define CAM_CC_BPS_AREG_CLK 1 +#define CAM_CC_BPS_AXI_CLK 2 +#define CAM_CC_BPS_CLK 3 +#define CAM_CC_BPS_CLK_SRC 4 +#define CAM_CC_CAMNOC_AXI_CLK 5 +#define CAM_CC_CAMNOC_AXI_CLK_SRC 6 +#define CAM_CC_CAMNOC_DCD_XO_CLK 7 +#define CAM_CC_CCI_0_CLK 8 +#define CAM_CC_CCI_0_CLK_SRC 9 +#define CAM_CC_CCI_1_CLK 10 +#define CAM_CC_CCI_1_CLK_SRC 11 +#define CAM_CC_CORE_AHB_CLK 12 +#define CAM_CC_CPAS_AHB_CLK 13 +#define CAM_CC_CPHY_RX_CLK_SRC 14 +#define CAM_CC_CSI0PHYTIMER_CLK 15 +#define CAM_CC_CSI0PHYTIMER_CLK_SRC 16 +#define CAM_CC_CSI1PHYTIMER_CLK 17 +#define CAM_CC_CSI1PHYTIMER_CLK_SRC 18 +#define CAM_CC_CSI2PHYTIMER_CLK 19 +#define CAM_CC_CSI2PHYTIMER_CLK_SRC 20 +#define CAM_CC_CSI3PHYTIMER_CLK 21 +#define CAM_CC_CSI3PHYTIMER_CLK_SRC 22 +#define CAM_CC_CSI4PHYTIMER_CLK 23 +#define CAM_CC_CSI4PHYTIMER_CLK_SRC 24 +#define CAM_CC_CSI5PHYTIMER_CLK 25 +#define CAM_CC_CSI5PHYTIMER_CLK_SRC 26 +#define CAM_CC_CSIPHY0_CLK 27 +#define CAM_CC_CSIPHY1_CLK 28 +#define CAM_CC_CSIPHY2_CLK 29 +#define CAM_CC_CSIPHY3_CLK 30 +#define CAM_CC_CSIPHY4_CLK 31 +#define CAM_CC_CSIPHY5_CLK 32 +#define CAM_CC_FAST_AHB_CLK_SRC 33 +#define CAM_CC_FD_CORE_CLK 34 +#define CAM_CC_FD_CORE_CLK_SRC 35 +#define CAM_CC_FD_CORE_UAR_CLK 36 +#define CAM_CC_GDSC_CLK 37 +#define CAM_CC_ICP_AHB_CLK 38 +#define CAM_CC_ICP_CLK 39 +#define CAM_CC_ICP_CLK_SRC 40 +#define CAM_CC_IFE_0_AHB_CLK 41 +#define CAM_CC_IFE_0_AREG_CLK 42 +#define CAM_CC_IFE_0_AXI_CLK 43 +#define CAM_CC_IFE_0_CLK 44 +#define CAM_CC_IFE_0_CLK_SRC 45 +#define CAM_CC_IFE_0_CPHY_RX_CLK 46 +#define CAM_CC_IFE_0_CSID_CLK 47 +#define CAM_CC_IFE_0_CSID_CLK_SRC 48 +#define CAM_CC_IFE_0_DSP_CLK 49 +#define CAM_CC_IFE_1_AHB_CLK 50 +#define CAM_CC_IFE_1_AREG_CLK 51 +#define CAM_CC_IFE_1_AXI_CLK 52 +#define CAM_CC_IFE_1_CLK 53 +#define CAM_CC_IFE_1_CLK_SRC 54 +#define CAM_CC_IFE_1_CPHY_RX_CLK 55 +#define CAM_CC_IFE_1_CSID_CLK 56 +#define CAM_CC_IFE_1_CSID_CLK_SRC 57 +#define CAM_CC_IFE_1_DSP_CLK 58 +#define CAM_CC_IFE_LITE_AHB_CLK 59 +#define CAM_CC_IFE_LITE_AXI_CLK 60 +#define CAM_CC_IFE_LITE_CLK 61 +#define CAM_CC_IFE_LITE_CLK_SRC 62 +#define CAM_CC_IFE_LITE_CPHY_RX_CLK 63 +#define CAM_CC_IFE_LITE_CSID_CLK 64 +#define CAM_CC_IFE_LITE_CSID_CLK_SRC 65 +#define CAM_CC_IPE_0_AHB_CLK 66 +#define CAM_CC_IPE_0_AREG_CLK 67 +#define CAM_CC_IPE_0_AXI_CLK 68 +#define CAM_CC_IPE_0_CLK 69 +#define CAM_CC_IPE_0_CLK_SRC 70 +#define CAM_CC_JPEG_CLK 71 +#define CAM_CC_JPEG_CLK_SRC 72 +#define CAM_CC_MCLK0_CLK 73 +#define CAM_CC_MCLK0_CLK_SRC 74 +#define CAM_CC_MCLK1_CLK 75 +#define CAM_CC_MCLK1_CLK_SRC 76 +#define CAM_CC_MCLK2_CLK 77 +#define CAM_CC_MCLK2_CLK_SRC 78 +#define CAM_CC_MCLK3_CLK 79 +#define CAM_CC_MCLK3_CLK_SRC 80 +#define CAM_CC_MCLK4_CLK 81 +#define CAM_CC_MCLK4_CLK_SRC 82 +#define CAM_CC_MCLK5_CLK 83 +#define CAM_CC_MCLK5_CLK_SRC 84 +#define CAM_CC_MCLK6_CLK 85 +#define CAM_CC_MCLK6_CLK_SRC 86 +#define CAM_CC_PLL0 87 +#define CAM_CC_PLL0_OUT_EVEN 88 +#define CAM_CC_PLL0_OUT_ODD 89 +#define CAM_CC_PLL1 90 +#define CAM_CC_PLL1_OUT_EVEN 91 +#define CAM_CC_PLL2 92 +#define CAM_CC_PLL2_OUT_MAIN 93 +#define CAM_CC_PLL3 94 +#define CAM_CC_PLL3_OUT_EVEN 95 +#define CAM_CC_PLL4 96 +#define CAM_CC_PLL4_OUT_EVEN 97 +#define CAM_CC_SBI_AHB_CLK 98 +#define CAM_CC_SBI_AXI_CLK 99 +#define CAM_CC_SBI_CLK 100 +#define CAM_CC_SBI_CPHY_RX_CLK 101 +#define CAM_CC_SBI_CSID_CLK 102 +#define CAM_CC_SBI_CSID_CLK_SRC 103 +#define CAM_CC_SBI_DIV_CLK_SRC 104 +#define CAM_CC_SBI_IFE_0_CLK 105 +#define CAM_CC_SBI_IFE_1_CLK 106 +#define CAM_CC_SLEEP_CLK 107 +#define CAM_CC_SLEEP_CLK_SRC 108 +#define CAM_CC_SLOW_AHB_CLK_SRC 109 +#define CAM_CC_XO_CLK_SRC 110 + +/* CAM_CC resets */ +#define CAM_CC_BPS_BCR 0 +#define CAM_CC_ICP_BCR 1 +#define CAM_CC_IFE_0_BCR 2 +#define CAM_CC_IFE_1_BCR 3 +#define CAM_CC_IPE_0_BCR 4 +#define CAM_CC_SBI_BCR 5 + +/* CAM_CC GDSCRs */ +#define BPS_GDSC 0 +#define IPE_0_GDSC 1 +#define SBI_GDSC 2 +#define IFE_0_GDSC 3 +#define IFE_1_GDSC 4 +#define TITAN_TOP_GDSC 5 + +#endif diff --git a/include/dt-bindings/clock/qcom,dispcc-sm8150.h b/include/dt-bindings/clock/qcom,dispcc-sm8150.h index fdaca6ad5c..ce001cbbc2 100644 --- a/include/dt-bindings/clock/qcom,dispcc-sm8150.h +++ b/include/dt-bindings/clock/qcom,dispcc-sm8150.h @@ -55,6 +55,15 @@ #define DISP_CC_MDSS_VSYNC_CLK_SRC 45 #define DISP_CC_PLL0 46 #define DISP_CC_PLL1 47 +#define DISP_CC_MDSS_EDP_AUX_CLK 48 +#define DISP_CC_MDSS_EDP_AUX_CLK_SRC 49 +#define DISP_CC_MDSS_EDP_GTC_CLK 50 +#define DISP_CC_MDSS_EDP_GTC_CLK_SRC 51 +#define DISP_CC_MDSS_EDP_LINK_CLK 52 +#define DISP_CC_MDSS_EDP_LINK_CLK_SRC 53 +#define DISP_CC_MDSS_EDP_LINK_INTF_CLK 54 +#define DISP_CC_MDSS_EDP_PIXEL_CLK 55 +#define DISP_CC_MDSS_EDP_PIXEL_CLK_SRC 56 /* DISP_CC Reset */ #define DISP_CC_MDSS_CORE_BCR 0 diff --git a/include/dt-bindings/clock/qcom,dispcc-sm8250.h b/include/dt-bindings/clock/qcom,dispcc-sm8250.h index fdaca6ad5c..ce001cbbc2 100644 --- a/include/dt-bindings/clock/qcom,dispcc-sm8250.h +++ b/include/dt-bindings/clock/qcom,dispcc-sm8250.h @@ -55,6 +55,15 @@ #define DISP_CC_MDSS_VSYNC_CLK_SRC 45 #define DISP_CC_PLL0 46 #define DISP_CC_PLL1 47 +#define DISP_CC_MDSS_EDP_AUX_CLK 48 +#define DISP_CC_MDSS_EDP_AUX_CLK_SRC 49 +#define DISP_CC_MDSS_EDP_GTC_CLK 50 +#define DISP_CC_MDSS_EDP_GTC_CLK_SRC 51 +#define DISP_CC_MDSS_EDP_LINK_CLK 52 +#define DISP_CC_MDSS_EDP_LINK_CLK_SRC 53 +#define DISP_CC_MDSS_EDP_LINK_INTF_CLK 54 +#define DISP_CC_MDSS_EDP_PIXEL_CLK 55 +#define DISP_CC_MDSS_EDP_PIXEL_CLK_SRC 56 /* DISP_CC Reset */ #define DISP_CC_MDSS_CORE_BCR 0 diff --git a/include/dt-bindings/clock/qcom,gcc-mdm9607.h b/include/dt-bindings/clock/qcom,gcc-mdm9607.h new file mode 100644 index 0000000000..357a680a40 --- /dev/null +++ b/include/dt-bindings/clock/qcom,gcc-mdm9607.h @@ -0,0 +1,104 @@ +/* SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-2-Clause) */ +/* + * Copyright (c) 2021, Konrad Dybcio + */ + +#ifndef _DT_BINDINGS_CLK_MSM_GCC_9607_H +#define _DT_BINDINGS_CLK_MSM_GCC_9607_H + +#define GPLL0 0 +#define GPLL0_EARLY 1 +#define GPLL1 2 +#define GPLL1_VOTE 3 +#define GPLL2 4 +#define GPLL2_EARLY 5 +#define PCNOC_BFDCD_CLK_SRC 6 +#define SYSTEM_NOC_BFDCD_CLK_SRC 7 +#define GCC_SMMU_CFG_CLK 8 +#define APSS_AHB_CLK_SRC 9 +#define GCC_QDSS_DAP_CLK 10 +#define BLSP1_QUP1_I2C_APPS_CLK_SRC 11 +#define BLSP1_QUP1_SPI_APPS_CLK_SRC 12 +#define BLSP1_QUP2_I2C_APPS_CLK_SRC 13 +#define BLSP1_QUP2_SPI_APPS_CLK_SRC 14 +#define BLSP1_QUP3_I2C_APPS_CLK_SRC 15 +#define BLSP1_QUP3_SPI_APPS_CLK_SRC 16 +#define BLSP1_QUP4_I2C_APPS_CLK_SRC 17 +#define BLSP1_QUP4_SPI_APPS_CLK_SRC 18 +#define BLSP1_QUP5_I2C_APPS_CLK_SRC 19 +#define BLSP1_QUP5_SPI_APPS_CLK_SRC 20 +#define BLSP1_QUP6_I2C_APPS_CLK_SRC 21 +#define BLSP1_QUP6_SPI_APPS_CLK_SRC 22 +#define BLSP1_UART1_APPS_CLK_SRC 23 +#define BLSP1_UART2_APPS_CLK_SRC 24 +#define CRYPTO_CLK_SRC 25 +#define GP1_CLK_SRC 26 +#define GP2_CLK_SRC 27 +#define GP3_CLK_SRC 28 +#define PDM2_CLK_SRC 29 +#define SDCC1_APPS_CLK_SRC 30 +#define SDCC2_APPS_CLK_SRC 31 +#define APSS_TCU_CLK_SRC 32 +#define USB_HS_SYSTEM_CLK_SRC 33 +#define GCC_BLSP1_AHB_CLK 34 +#define GCC_BLSP1_SLEEP_CLK 35 +#define GCC_BLSP1_QUP1_I2C_APPS_CLK 36 +#define GCC_BLSP1_QUP1_SPI_APPS_CLK 37 +#define GCC_BLSP1_QUP2_I2C_APPS_CLK 38 +#define GCC_BLSP1_QUP2_SPI_APPS_CLK 39 +#define GCC_BLSP1_QUP3_I2C_APPS_CLK 40 +#define GCC_BLSP1_QUP3_SPI_APPS_CLK 41 +#define GCC_BLSP1_QUP4_I2C_APPS_CLK 42 +#define GCC_BLSP1_QUP4_SPI_APPS_CLK 43 +#define GCC_BLSP1_QUP5_I2C_APPS_CLK 44 +#define GCC_BLSP1_QUP5_SPI_APPS_CLK 45 +#define GCC_BLSP1_QUP6_I2C_APPS_CLK 46 +#define GCC_BLSP1_QUP6_SPI_APPS_CLK 47 +#define GCC_BLSP1_UART1_APPS_CLK 48 +#define GCC_BLSP1_UART2_APPS_CLK 49 +#define GCC_BOOT_ROM_AHB_CLK 50 +#define GCC_CRYPTO_AHB_CLK 51 +#define GCC_CRYPTO_AXI_CLK 52 +#define GCC_CRYPTO_CLK 53 +#define GCC_GP1_CLK 54 +#define GCC_GP2_CLK 55 +#define GCC_GP3_CLK 56 +#define GCC_MSS_CFG_AHB_CLK 57 +#define GCC_PDM2_CLK 58 +#define GCC_PDM_AHB_CLK 59 +#define GCC_PRNG_AHB_CLK 60 +#define GCC_SDCC1_AHB_CLK 61 +#define GCC_SDCC1_APPS_CLK 62 +#define GCC_SDCC2_AHB_CLK 63 +#define GCC_SDCC2_APPS_CLK 64 +#define GCC_USB2A_PHY_SLEEP_CLK 65 +#define GCC_USB_HS_AHB_CLK 66 +#define GCC_USB_HS_SYSTEM_CLK 67 +#define GCC_APSS_TCU_CLK 68 +#define GCC_MSS_Q6_BIMC_AXI_CLK 69 +#define BIMC_PLL 70 +#define BIMC_PLL_VOTE 71 +#define BIMC_DDR_CLK_SRC 72 +#define BLSP1_UART3_APPS_CLK_SRC 73 +#define BLSP1_UART4_APPS_CLK_SRC 74 +#define BLSP1_UART5_APPS_CLK_SRC 75 +#define BLSP1_UART6_APPS_CLK_SRC 76 +#define GCC_BLSP1_UART3_APPS_CLK 77 +#define GCC_BLSP1_UART4_APPS_CLK 78 +#define GCC_BLSP1_UART5_APPS_CLK 79 +#define GCC_BLSP1_UART6_APPS_CLK 80 +#define GCC_APSS_AHB_CLK 81 +#define GCC_APSS_AXI_CLK 82 +#define GCC_USB_HS_PHY_CFG_AHB_CLK 83 +#define GCC_USB_HSIC_CLK_SRC 84 +#define GCC_USB_HSIC_IO_CAL_CLK_SRC 85 +#define GCC_USB_HSIC_SYSTEM_CLK_SRC 86 + +/* Resets */ +#define USB2_HS_PHY_ONLY_BCR 0 +#define QUSB2_PHY_BCR 1 +#define GCC_MSS_RESTART 2 +#define USB_HS_HSIC_BCR 3 +#define USB_HS_BCR 4 + +#endif diff --git a/include/dt-bindings/clock/qcom,gcc-sm6125.h b/include/dt-bindings/clock/qcom,gcc-sm6125.h new file mode 100644 index 0000000000..08ea180868 --- /dev/null +++ b/include/dt-bindings/clock/qcom,gcc-sm6125.h @@ -0,0 +1,240 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (c) 2021, Konrad Dybcio + */ + +#ifndef _DT_BINDINGS_CLK_QCOM_GCC_SM6125_H +#define _DT_BINDINGS_CLK_QCOM_GCC_SM6125_H + +#define GPLL0_OUT_AUX2 0 +#define GPLL0_OUT_MAIN 1 +#define GPLL6_OUT_MAIN 2 +#define GPLL7_OUT_MAIN 3 +#define GPLL8_OUT_MAIN 4 +#define GPLL9_OUT_MAIN 5 +#define GPLL0_OUT_EARLY 6 +#define GPLL3_OUT_EARLY 7 +#define GPLL4_OUT_MAIN 8 +#define GPLL5_OUT_MAIN 9 +#define GPLL6_OUT_EARLY 10 +#define GPLL7_OUT_EARLY 11 +#define GPLL8_OUT_EARLY 12 +#define GPLL9_OUT_EARLY 13 +#define GCC_AHB2PHY_CSI_CLK 14 +#define GCC_AHB2PHY_USB_CLK 15 +#define GCC_APC_VS_CLK 16 +#define GCC_BOOT_ROM_AHB_CLK 17 +#define GCC_CAMERA_AHB_CLK 18 +#define GCC_CAMERA_XO_CLK 19 +#define GCC_CAMSS_AHB_CLK_SRC 20 +#define GCC_CAMSS_CCI_AHB_CLK 21 +#define GCC_CAMSS_CCI_CLK 22 +#define GCC_CAMSS_CCI_CLK_SRC 23 +#define GCC_CAMSS_CPHY_CSID0_CLK 24 +#define GCC_CAMSS_CPHY_CSID1_CLK 25 +#define GCC_CAMSS_CPHY_CSID2_CLK 26 +#define GCC_CAMSS_CPHY_CSID3_CLK 27 +#define GCC_CAMSS_CPP_AHB_CLK 28 +#define GCC_CAMSS_CPP_AXI_CLK 29 +#define GCC_CAMSS_CPP_CLK 30 +#define GCC_CAMSS_CPP_CLK_SRC 31 +#define GCC_CAMSS_CPP_VBIF_AHB_CLK 32 +#define GCC_CAMSS_CSI0_AHB_CLK 33 +#define GCC_CAMSS_CSI0_CLK 34 +#define GCC_CAMSS_CSI0_CLK_SRC 35 +#define GCC_CAMSS_CSI0PHYTIMER_CLK 36 +#define GCC_CAMSS_CSI0PHYTIMER_CLK_SRC 37 +#define GCC_CAMSS_CSI0PIX_CLK 38 +#define GCC_CAMSS_CSI0RDI_CLK 39 +#define GCC_CAMSS_CSI1_AHB_CLK 40 +#define GCC_CAMSS_CSI1_CLK 41 +#define GCC_CAMSS_CSI1_CLK_SRC 42 +#define GCC_CAMSS_CSI1PHYTIMER_CLK 43 +#define GCC_CAMSS_CSI1PHYTIMER_CLK_SRC 44 +#define GCC_CAMSS_CSI1PIX_CLK 45 +#define GCC_CAMSS_CSI1RDI_CLK 46 +#define GCC_CAMSS_CSI2_AHB_CLK 47 +#define GCC_CAMSS_CSI2_CLK 48 +#define GCC_CAMSS_CSI2_CLK_SRC 49 +#define GCC_CAMSS_CSI2PHYTIMER_CLK 50 +#define GCC_CAMSS_CSI2PHYTIMER_CLK_SRC 51 +#define GCC_CAMSS_CSI2PIX_CLK 52 +#define GCC_CAMSS_CSI2RDI_CLK 53 +#define GCC_CAMSS_CSI3_AHB_CLK 54 +#define GCC_CAMSS_CSI3_CLK 55 +#define GCC_CAMSS_CSI3_CLK_SRC 56 +#define GCC_CAMSS_CSI3PIX_CLK 57 +#define GCC_CAMSS_CSI3RDI_CLK 58 +#define GCC_CAMSS_CSI_VFE0_CLK 59 +#define GCC_CAMSS_CSI_VFE1_CLK 60 +#define GCC_CAMSS_CSIPHY0_CLK 61 +#define GCC_CAMSS_CSIPHY1_CLK 62 +#define GCC_CAMSS_CSIPHY2_CLK 63 +#define GCC_CAMSS_CSIPHY_CLK_SRC 64 +#define GCC_CAMSS_GP0_CLK 65 +#define GCC_CAMSS_GP0_CLK_SRC 66 +#define GCC_CAMSS_GP1_CLK 67 +#define GCC_CAMSS_GP1_CLK_SRC 68 +#define GCC_CAMSS_ISPIF_AHB_CLK 69 +#define GCC_CAMSS_JPEG_AHB_CLK 70 +#define GCC_CAMSS_JPEG_AXI_CLK 71 +#define GCC_CAMSS_JPEG_CLK 72 +#define GCC_CAMSS_JPEG_CLK_SRC 73 +#define GCC_CAMSS_MCLK0_CLK 74 +#define GCC_CAMSS_MCLK0_CLK_SRC 75 +#define GCC_CAMSS_MCLK1_CLK 76 +#define GCC_CAMSS_MCLK1_CLK_SRC 77 +#define GCC_CAMSS_MCLK2_CLK 78 +#define GCC_CAMSS_MCLK2_CLK_SRC 79 +#define GCC_CAMSS_MCLK3_CLK 80 +#define GCC_CAMSS_MCLK3_CLK_SRC 81 +#define GCC_CAMSS_MICRO_AHB_CLK 82 +#define GCC_CAMSS_THROTTLE_NRT_AXI_CLK 83 +#define GCC_CAMSS_THROTTLE_RT_AXI_CLK 84 +#define GCC_CAMSS_TOP_AHB_CLK 85 +#define GCC_CAMSS_VFE0_AHB_CLK 86 +#define GCC_CAMSS_VFE0_CLK 87 +#define GCC_CAMSS_VFE0_CLK_SRC 88 +#define GCC_CAMSS_VFE0_STREAM_CLK 89 +#define GCC_CAMSS_VFE1_AHB_CLK 90 +#define GCC_CAMSS_VFE1_CLK 91 +#define GCC_CAMSS_VFE1_CLK_SRC 92 +#define GCC_CAMSS_VFE1_STREAM_CLK 93 +#define GCC_CAMSS_VFE_TSCTR_CLK 94 +#define GCC_CAMSS_VFE_VBIF_AHB_CLK 95 +#define GCC_CAMSS_VFE_VBIF_AXI_CLK 96 +#define GCC_CE1_AHB_CLK 97 +#define GCC_CE1_AXI_CLK 98 +#define GCC_CE1_CLK 99 +#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 100 +#define GCC_CPUSS_GNOC_CLK 101 +#define GCC_DISP_AHB_CLK 102 +#define GCC_DISP_GPLL0_DIV_CLK_SRC 103 +#define GCC_DISP_HF_AXI_CLK 104 +#define GCC_DISP_THROTTLE_CORE_CLK 105 +#define GCC_DISP_XO_CLK 106 +#define GCC_GP1_CLK 107 +#define GCC_GP1_CLK_SRC 108 +#define GCC_GP2_CLK 109 +#define GCC_GP2_CLK_SRC 110 +#define GCC_GP3_CLK 111 +#define GCC_GP3_CLK_SRC 112 +#define GCC_GPU_CFG_AHB_CLK 113 +#define GCC_GPU_GPLL0_CLK_SRC 114 +#define GCC_GPU_GPLL0_DIV_CLK_SRC 115 +#define GCC_GPU_MEMNOC_GFX_CLK 116 +#define GCC_GPU_SNOC_DVM_GFX_CLK 117 +#define GCC_GPU_THROTTLE_CORE_CLK 118 +#define GCC_GPU_THROTTLE_XO_CLK 119 +#define GCC_MSS_VS_CLK 120 +#define GCC_PDM2_CLK 121 +#define GCC_PDM2_CLK_SRC 122 +#define GCC_PDM_AHB_CLK 123 +#define GCC_PDM_XO4_CLK 124 +#define GCC_PRNG_AHB_CLK 125 +#define GCC_QMIP_CAMERA_NRT_AHB_CLK 126 +#define GCC_QMIP_CAMERA_RT_AHB_CLK 127 +#define GCC_QMIP_DISP_AHB_CLK 128 +#define GCC_QMIP_GPU_CFG_AHB_CLK 129 +#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 130 +#define GCC_QUPV3_WRAP0_CORE_2X_CLK 131 +#define GCC_QUPV3_WRAP0_CORE_CLK 132 +#define GCC_QUPV3_WRAP0_S0_CLK 133 +#define GCC_QUPV3_WRAP0_S0_CLK_SRC 134 +#define GCC_QUPV3_WRAP0_S1_CLK 135 +#define GCC_QUPV3_WRAP0_S1_CLK_SRC 136 +#define GCC_QUPV3_WRAP0_S2_CLK 137 +#define GCC_QUPV3_WRAP0_S2_CLK_SRC 138 +#define GCC_QUPV3_WRAP0_S3_CLK 139 +#define GCC_QUPV3_WRAP0_S3_CLK_SRC 140 +#define GCC_QUPV3_WRAP0_S4_CLK 141 +#define GCC_QUPV3_WRAP0_S4_CLK_SRC 142 +#define GCC_QUPV3_WRAP0_S5_CLK 143 +#define GCC_QUPV3_WRAP0_S5_CLK_SRC 144 +#define GCC_QUPV3_WRAP1_CORE_2X_CLK 145 +#define GCC_QUPV3_WRAP1_CORE_CLK 146 +#define GCC_QUPV3_WRAP1_S0_CLK 147 +#define GCC_QUPV3_WRAP1_S0_CLK_SRC 148 +#define GCC_QUPV3_WRAP1_S1_CLK 149 +#define GCC_QUPV3_WRAP1_S1_CLK_SRC 150 +#define GCC_QUPV3_WRAP1_S2_CLK 151 +#define GCC_QUPV3_WRAP1_S2_CLK_SRC 152 +#define GCC_QUPV3_WRAP1_S3_CLK 153 +#define GCC_QUPV3_WRAP1_S3_CLK_SRC 154 +#define GCC_QUPV3_WRAP1_S4_CLK 155 +#define GCC_QUPV3_WRAP1_S4_CLK_SRC 156 +#define GCC_QUPV3_WRAP1_S5_CLK 157 +#define GCC_QUPV3_WRAP1_S5_CLK_SRC 158 +#define GCC_QUPV3_WRAP_0_M_AHB_CLK 159 +#define GCC_QUPV3_WRAP_0_S_AHB_CLK 160 +#define GCC_QUPV3_WRAP_1_M_AHB_CLK 161 +#define GCC_QUPV3_WRAP_1_S_AHB_CLK 162 +#define GCC_SDCC1_AHB_CLK 163 +#define GCC_SDCC1_APPS_CLK 164 +#define GCC_SDCC1_APPS_CLK_SRC 165 +#define GCC_SDCC1_ICE_CORE_CLK 166 +#define GCC_SDCC1_ICE_CORE_CLK_SRC 167 +#define GCC_SDCC2_AHB_CLK 168 +#define GCC_SDCC2_APPS_CLK 169 +#define GCC_SDCC2_APPS_CLK_SRC 170 +#define GCC_SYS_NOC_CPUSS_AHB_CLK 171 +#define GCC_SYS_NOC_UFS_PHY_AXI_CLK 172 +#define GCC_SYS_NOC_USB3_PRIM_AXI_CLK 173 +#define GCC_UFS_PHY_AHB_CLK 174 +#define GCC_UFS_PHY_AXI_CLK 175 +#define GCC_UFS_PHY_AXI_CLK_SRC 176 +#define GCC_UFS_PHY_ICE_CORE_CLK 177 +#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 178 +#define GCC_UFS_PHY_PHY_AUX_CLK 179 +#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 180 +#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 181 +#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 182 +#define GCC_UFS_PHY_UNIPRO_CORE_CLK 183 +#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 184 +#define GCC_USB30_PRIM_MASTER_CLK 185 +#define GCC_USB30_PRIM_MASTER_CLK_SRC 186 +#define GCC_USB30_PRIM_MOCK_UTMI_CLK 187 +#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 188 +#define GCC_USB30_PRIM_SLEEP_CLK 189 +#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 190 +#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 191 +#define GCC_USB3_PRIM_PHY_PIPE_CLK 192 +#define GCC_VDDA_VS_CLK 193 +#define GCC_VDDCX_VS_CLK 194 +#define GCC_VDDMX_VS_CLK 195 +#define GCC_VIDEO_AHB_CLK 196 +#define GCC_VIDEO_AXI0_CLK 197 +#define GCC_VIDEO_THROTTLE_CORE_CLK 198 +#define GCC_VIDEO_XO_CLK 199 +#define GCC_VS_CTRL_AHB_CLK 200 +#define GCC_VS_CTRL_CLK 201 +#define GCC_VS_CTRL_CLK_SRC 202 +#define GCC_VSENSOR_CLK_SRC 203 +#define GCC_WCSS_VS_CLK 204 +#define GCC_USB3_PRIM_CLKREF_CLK 205 +#define GCC_SYS_NOC_COMPUTE_SF_AXI_CLK 206 +#define GCC_BIMC_GPU_AXI_CLK 207 +#define GCC_UFS_MEM_CLKREF_CLK 208 + +/* GDSCs */ +#define USB30_PRIM_GDSC 0 +#define UFS_PHY_GDSC 1 +#define CAMSS_VFE0_GDSC 2 +#define CAMSS_VFE1_GDSC 3 +#define CAMSS_TOP_GDSC 4 +#define CAM_CPP_GDSC 5 +#define HLOS1_VOTE_TURING_MMU_TBU1_GDSC 6 +#define HLOS1_VOTE_MM_SNOC_MMU_TBU_RT_GDSC 7 +#define HLOS1_VOTE_MM_SNOC_MMU_TBU_NRT_GDSC 8 +#define HLOS1_VOTE_TURING_MMU_TBU0_GDSC 9 + +#define GCC_QUSB2PHY_PRIM_BCR 0 +#define GCC_QUSB2PHY_SEC_BCR 1 +#define GCC_UFS_PHY_BCR 2 +#define GCC_USB30_PRIM_BCR 3 +#define GCC_USB_PHY_CFG_AHB2PHY_BCR 4 +#define GCC_USB3_PHY_PRIM_SP0_BCR 5 +#define GCC_USB3PHY_PHY_PRIM_SP0_BCR 6 +#define GCC_CAMSS_MICRO_BCR 7 + +#endif diff --git a/include/dt-bindings/clock/r9a07g044-cpg.h b/include/dt-bindings/clock/r9a07g044-cpg.h new file mode 100644 index 0000000000..0728ad07ff --- /dev/null +++ b/include/dt-bindings/clock/r9a07g044-cpg.h @@ -0,0 +1,219 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) + * + * Copyright (C) 2021 Renesas Electronics Corp. + */ +#ifndef __DT_BINDINGS_CLOCK_R9A07G044_CPG_H__ +#define __DT_BINDINGS_CLOCK_R9A07G044_CPG_H__ + +#include + +/* R9A07G044 CPG Core Clocks */ +#define R9A07G044_CLK_I 0 +#define R9A07G044_CLK_I2 1 +#define R9A07G044_CLK_G 2 +#define R9A07G044_CLK_S0 3 +#define R9A07G044_CLK_S1 4 +#define R9A07G044_CLK_SPI0 5 +#define R9A07G044_CLK_SPI1 6 +#define R9A07G044_CLK_SD0 7 +#define R9A07G044_CLK_SD1 8 +#define R9A07G044_CLK_M0 9 +#define R9A07G044_CLK_M1 10 +#define R9A07G044_CLK_M2 11 +#define R9A07G044_CLK_M3 12 +#define R9A07G044_CLK_M4 13 +#define R9A07G044_CLK_HP 14 +#define R9A07G044_CLK_TSU 15 +#define R9A07G044_CLK_ZT 16 +#define R9A07G044_CLK_P0 17 +#define R9A07G044_CLK_P1 18 +#define R9A07G044_CLK_P2 19 +#define R9A07G044_CLK_AT 20 +#define R9A07G044_OSCCLK 21 + +/* R9A07G044 Module Clocks */ +#define R9A07G044_CA55_SCLK 0 +#define R9A07G044_CA55_PCLK 1 +#define R9A07G044_CA55_ATCLK 2 +#define R9A07G044_CA55_GICCLK 3 +#define R9A07G044_CA55_PERICLK 4 +#define R9A07G044_CA55_ACLK 5 +#define R9A07G044_CA55_TSCLK 6 +#define R9A07G044_GIC600_GICCLK 7 +#define R9A07G044_IA55_CLK 8 +#define R9A07G044_IA55_PCLK 9 +#define R9A07G044_MHU_PCLK 10 +#define R9A07G044_SYC_CNT_CLK 11 +#define R9A07G044_DMAC_ACLK 12 +#define R9A07G044_DMAC_PCLK 13 +#define R9A07G044_OSTM0_PCLK 14 +#define R9A07G044_OSTM1_PCLK 15 +#define R9A07G044_OSTM2_PCLK 16 +#define R9A07G044_MTU_X_MCK_MTU3 17 +#define R9A07G044_POE3_CLKM_POE 18 +#define R9A07G044_GPT_PCLK 19 +#define R9A07G044_POEG_A_CLKP 20 +#define R9A07G044_POEG_B_CLKP 21 +#define R9A07G044_POEG_C_CLKP 22 +#define R9A07G044_POEG_D_CLKP 23 +#define R9A07G044_WDT0_PCLK 24 +#define R9A07G044_WDT0_CLK 25 +#define R9A07G044_WDT1_PCLK 26 +#define R9A07G044_WDT1_CLK 27 +#define R9A07G044_WDT2_PCLK 28 +#define R9A07G044_WDT2_CLK 29 +#define R9A07G044_SPI_CLK2 30 +#define R9A07G044_SPI_CLK 31 +#define R9A07G044_SDHI0_IMCLK 32 +#define R9A07G044_SDHI0_IMCLK2 33 +#define R9A07G044_SDHI0_CLK_HS 34 +#define R9A07G044_SDHI0_ACLK 35 +#define R9A07G044_SDHI1_IMCLK 36 +#define R9A07G044_SDHI1_IMCLK2 37 +#define R9A07G044_SDHI1_CLK_HS 38 +#define R9A07G044_SDHI1_ACLK 39 +#define R9A07G044_GPU_CLK 40 +#define R9A07G044_GPU_AXI_CLK 41 +#define R9A07G044_GPU_ACE_CLK 42 +#define R9A07G044_ISU_ACLK 43 +#define R9A07G044_ISU_PCLK 44 +#define R9A07G044_H264_CLK_A 45 +#define R9A07G044_H264_CLK_P 46 +#define R9A07G044_CRU_SYSCLK 47 +#define R9A07G044_CRU_VCLK 48 +#define R9A07G044_CRU_PCLK 49 +#define R9A07G044_CRU_ACLK 50 +#define R9A07G044_MIPI_DSI_PLLCLK 51 +#define R9A07G044_MIPI_DSI_SYSCLK 52 +#define R9A07G044_MIPI_DSI_ACLK 53 +#define R9A07G044_MIPI_DSI_PCLK 54 +#define R9A07G044_MIPI_DSI_VCLK 55 +#define R9A07G044_MIPI_DSI_LPCLK 56 +#define R9A07G044_LCDC_CLK_A 57 +#define R9A07G044_LCDC_CLK_P 58 +#define R9A07G044_LCDC_CLK_D 59 +#define R9A07G044_SSI0_PCLK2 60 +#define R9A07G044_SSI0_PCLK_SFR 61 +#define R9A07G044_SSI1_PCLK2 62 +#define R9A07G044_SSI1_PCLK_SFR 63 +#define R9A07G044_SSI2_PCLK2 64 +#define R9A07G044_SSI2_PCLK_SFR 65 +#define R9A07G044_SSI3_PCLK2 66 +#define R9A07G044_SSI3_PCLK_SFR 67 +#define R9A07G044_SRC_CLKP 68 +#define R9A07G044_USB_U2H0_HCLK 69 +#define R9A07G044_USB_U2H1_HCLK 70 +#define R9A07G044_USB_U2P_EXR_CPUCLK 71 +#define R9A07G044_USB_PCLK 72 +#define R9A07G044_ETH0_CLK_AXI 73 +#define R9A07G044_ETH0_CLK_CHI 74 +#define R9A07G044_ETH1_CLK_AXI 75 +#define R9A07G044_ETH1_CLK_CHI 76 +#define R9A07G044_I2C0_PCLK 77 +#define R9A07G044_I2C1_PCLK 78 +#define R9A07G044_I2C2_PCLK 79 +#define R9A07G044_I2C3_PCLK 80 +#define R9A07G044_SCIF0_CLK_PCK 81 +#define R9A07G044_SCIF1_CLK_PCK 82 +#define R9A07G044_SCIF2_CLK_PCK 83 +#define R9A07G044_SCIF3_CLK_PCK 84 +#define R9A07G044_SCIF4_CLK_PCK 85 +#define R9A07G044_SCI0_CLKP 86 +#define R9A07G044_SCI1_CLKP 87 +#define R9A07G044_IRDA_CLKP 88 +#define R9A07G044_RSPI0_CLKB 89 +#define R9A07G044_RSPI1_CLKB 90 +#define R9A07G044_RSPI2_CLKB 91 +#define R9A07G044_CANFD_PCLK 92 +#define R9A07G044_GPIO_HCLK 93 +#define R9A07G044_ADC_ADCLK 94 +#define R9A07G044_ADC_PCLK 95 +#define R9A07G044_TSU_PCLK 96 + +/* R9A07G044 Resets */ +#define R9A07G044_CA55_RST_1_0 0 +#define R9A07G044_CA55_RST_1_1 1 +#define R9A07G044_CA55_RST_3_0 2 +#define R9A07G044_CA55_RST_3_1 3 +#define R9A07G044_CA55_RST_4 4 +#define R9A07G044_CA55_RST_5 5 +#define R9A07G044_CA55_RST_6 6 +#define R9A07G044_CA55_RST_7 7 +#define R9A07G044_CA55_RST_8 8 +#define R9A07G044_CA55_RST_9 9 +#define R9A07G044_CA55_RST_10 10 +#define R9A07G044_CA55_RST_11 11 +#define R9A07G044_CA55_RST_12 12 +#define R9A07G044_GIC600_GICRESET_N 13 +#define R9A07G044_GIC600_DBG_GICRESET_N 14 +#define R9A07G044_IA55_RESETN 15 +#define R9A07G044_MHU_RESETN 16 +#define R9A07G044_DMAC_ARESETN 17 +#define R9A07G044_DMAC_RST_ASYNC 18 +#define R9A07G044_SYC_RESETN 19 +#define R9A07G044_OSTM0_PRESETZ 20 +#define R9A07G044_OSTM1_PRESETZ 21 +#define R9A07G044_OSTM2_PRESETZ 22 +#define R9A07G044_MTU_X_PRESET_MTU3 23 +#define R9A07G044_POE3_RST_M_REG 24 +#define R9A07G044_GPT_RST_C 25 +#define R9A07G044_POEG_A_RST 26 +#define R9A07G044_POEG_B_RST 27 +#define R9A07G044_POEG_C_RST 28 +#define R9A07G044_POEG_D_RST 29 +#define R9A07G044_WDT0_PRESETN 30 +#define R9A07G044_WDT1_PRESETN 31 +#define R9A07G044_WDT2_PRESETN 32 +#define R9A07G044_SPI_RST 33 +#define R9A07G044_SDHI0_IXRST 34 +#define R9A07G044_SDHI1_IXRST 35 +#define R9A07G044_GPU_RESETN 36 +#define R9A07G044_GPU_AXI_RESETN 37 +#define R9A07G044_GPU_ACE_RESETN 38 +#define R9A07G044_ISU_ARESETN 39 +#define R9A07G044_ISU_PRESETN 40 +#define R9A07G044_H264_X_RESET_VCP 41 +#define R9A07G044_H264_CP_PRESET_P 42 +#define R9A07G044_CRU_CMN_RSTB 43 +#define R9A07G044_CRU_PRESETN 44 +#define R9A07G044_CRU_ARESETN 45 +#define R9A07G044_MIPI_DSI_CMN_RSTB 46 +#define R9A07G044_MIPI_DSI_ARESET_N 47 +#define R9A07G044_MIPI_DSI_PRESET_N 48 +#define R9A07G044_LCDC_RESET_N 49 +#define R9A07G044_SSI0_RST_M2_REG 50 +#define R9A07G044_SSI1_RST_M2_REG 51 +#define R9A07G044_SSI2_RST_M2_REG 52 +#define R9A07G044_SSI3_RST_M2_REG 53 +#define R9A07G044_SRC_RST 54 +#define R9A07G044_USB_U2H0_HRESETN 55 +#define R9A07G044_USB_U2H1_HRESETN 56 +#define R9A07G044_USB_U2P_EXL_SYSRST 57 +#define R9A07G044_USB_PRESETN 58 +#define R9A07G044_ETH0_RST_HW_N 59 +#define R9A07G044_ETH1_RST_HW_N 60 +#define R9A07G044_I2C0_MRST 61 +#define R9A07G044_I2C1_MRST 62 +#define R9A07G044_I2C2_MRST 63 +#define R9A07G044_I2C3_MRST 64 +#define R9A07G044_SCIF0_RST_SYSTEM_N 65 +#define R9A07G044_SCIF1_RST_SYSTEM_N 66 +#define R9A07G044_SCIF2_RST_SYSTEM_N 67 +#define R9A07G044_SCIF3_RST_SYSTEM_N 68 +#define R9A07G044_SCIF4_RST_SYSTEM_N 69 +#define R9A07G044_SCI0_RST 70 +#define R9A07G044_SCI1_RST 71 +#define R9A07G044_IRDA_RST 72 +#define R9A07G044_RSPI0_RST 73 +#define R9A07G044_RSPI1_RST 74 +#define R9A07G044_RSPI2_RST 75 +#define R9A07G044_CANFD_RSTP_N 76 +#define R9A07G044_CANFD_RSTC_N 77 +#define R9A07G044_GPIO_RSTN 78 +#define R9A07G044_GPIO_PORT_RESETN 79 +#define R9A07G044_GPIO_SPARE_RESETN 80 +#define R9A07G044_ADC_PRESETN 81 +#define R9A07G044_ADC_ADRST_N 82 +#define R9A07G044_TSU_PRESETN 83 + +#endif /* __DT_BINDINGS_CLOCK_R9A07G044_CPG_H__ */ diff --git a/include/dt-bindings/clock/stm32mp1-clks.h b/include/dt-bindings/clock/stm32mp1-clks.h index 4cdaf13582..e02770b98e 100644 --- a/include/dt-bindings/clock/stm32mp1-clks.h +++ b/include/dt-bindings/clock/stm32mp1-clks.h @@ -248,4 +248,31 @@ #define STM32MP1_LAST_CLK 232 +/* SCMI clock identifiers */ +#define CK_SCMI0_HSE 0 +#define CK_SCMI0_HSI 1 +#define CK_SCMI0_CSI 2 +#define CK_SCMI0_LSE 3 +#define CK_SCMI0_LSI 4 +#define CK_SCMI0_PLL2_Q 5 +#define CK_SCMI0_PLL2_R 6 +#define CK_SCMI0_MPU 7 +#define CK_SCMI0_AXI 8 +#define CK_SCMI0_BSEC 9 +#define CK_SCMI0_CRYP1 10 +#define CK_SCMI0_GPIOZ 11 +#define CK_SCMI0_HASH1 12 +#define CK_SCMI0_I2C4 13 +#define CK_SCMI0_I2C6 14 +#define CK_SCMI0_IWDG1 15 +#define CK_SCMI0_RNG1 16 +#define CK_SCMI0_RTC 17 +#define CK_SCMI0_RTCAPB 18 +#define CK_SCMI0_SPI6 19 +#define CK_SCMI0_USART1 20 + +#define CK_SCMI1_PLL3_Q 0 +#define CK_SCMI1_PLL3_R 1 +#define CK_SCMI1_MCU 2 + #endif /* _DT_BINDINGS_STM32MP1_CLKS_H_ */ diff --git a/include/dt-bindings/interconnect/qcom,sc7280.h b/include/dt-bindings/interconnect/qcom,sc7280.h new file mode 100644 index 0000000000..21b0004439 --- /dev/null +++ b/include/dt-bindings/interconnect/qcom,sc7280.h @@ -0,0 +1,165 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Qualcomm SC7280 interconnect IDs + * + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + */ + +#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SC7280_H +#define __DT_BINDINGS_INTERCONNECT_QCOM_SC7280_H + +#define MASTER_QSPI_0 0 +#define MASTER_QUP_0 1 +#define MASTER_QUP_1 2 +#define MASTER_A1NOC_CFG 3 +#define MASTER_PCIE_0 4 +#define MASTER_PCIE_1 5 +#define MASTER_SDCC_1 6 +#define MASTER_SDCC_2 7 +#define MASTER_SDCC_4 8 +#define MASTER_UFS_MEM 9 +#define MASTER_USB2 10 +#define MASTER_USB3_0 11 +#define SLAVE_A1NOC_SNOC 12 +#define SLAVE_ANOC_PCIE_GEM_NOC 13 +#define SLAVE_SERVICE_A1NOC 14 + +#define MASTER_QDSS_BAM 0 +#define MASTER_A2NOC_CFG 1 +#define MASTER_CNOC_A2NOC 2 +#define MASTER_CRYPTO 3 +#define MASTER_IPA 4 +#define MASTER_QDSS_ETR 5 +#define SLAVE_A2NOC_SNOC 6 +#define SLAVE_SERVICE_A2NOC 7 + +#define MASTER_QUP_CORE_0 0 +#define MASTER_QUP_CORE_1 1 +#define SLAVE_QUP_CORE_0 2 +#define SLAVE_QUP_CORE_1 3 + +#define MASTER_CNOC3_CNOC2 0 +#define MASTER_QDSS_DAP 1 +#define SLAVE_AHB2PHY_SOUTH 2 +#define SLAVE_AHB2PHY_NORTH 3 +#define SLAVE_CAMERA_CFG 4 +#define SLAVE_CLK_CTL 5 +#define SLAVE_CDSP_CFG 6 +#define SLAVE_RBCPR_CX_CFG 7 +#define SLAVE_RBCPR_MX_CFG 8 +#define SLAVE_CRYPTO_0_CFG 9 +#define SLAVE_CX_RDPM 10 +#define SLAVE_DCC_CFG 11 +#define SLAVE_DISPLAY_CFG 12 +#define SLAVE_GFX3D_CFG 13 +#define SLAVE_HWKM 14 +#define SLAVE_IMEM_CFG 15 +#define SLAVE_IPA_CFG 16 +#define SLAVE_IPC_ROUTER_CFG 17 +#define SLAVE_LPASS 18 +#define SLAVE_CNOC_MSS 19 +#define SLAVE_MX_RDPM 20 +#define SLAVE_PCIE_0_CFG 21 +#define SLAVE_PCIE_1_CFG 22 +#define SLAVE_PDM 23 +#define SLAVE_PIMEM_CFG 24 +#define SLAVE_PKA_WRAPPER_CFG 25 +#define SLAVE_PMU_WRAPPER_CFG 26 +#define SLAVE_QDSS_CFG 27 +#define SLAVE_QSPI_0 28 +#define SLAVE_QUP_0 29 +#define SLAVE_QUP_1 30 +#define SLAVE_SDCC_1 31 +#define SLAVE_SDCC_2 32 +#define SLAVE_SDCC_4 33 +#define SLAVE_SECURITY 34 +#define SLAVE_TCSR 35 +#define SLAVE_TLMM 36 +#define SLAVE_UFS_MEM_CFG 37 +#define SLAVE_USB2 38 +#define SLAVE_USB3_0 39 +#define SLAVE_VENUS_CFG 40 +#define SLAVE_VSENSE_CTRL_CFG 41 +#define SLAVE_A1NOC_CFG 42 +#define SLAVE_A2NOC_CFG 43 +#define SLAVE_CNOC2_CNOC3 44 +#define SLAVE_CNOC_MNOC_CFG 45 +#define SLAVE_SNOC_CFG 46 + +#define MASTER_CNOC2_CNOC3 0 +#define MASTER_GEM_NOC_CNOC 1 +#define MASTER_GEM_NOC_PCIE_SNOC 2 +#define SLAVE_AOSS 3 +#define SLAVE_APPSS 4 +#define SLAVE_CNOC3_CNOC2 5 +#define SLAVE_CNOC_A2NOC 6 +#define SLAVE_DDRSS_CFG 7 +#define SLAVE_BOOT_IMEM 8 +#define SLAVE_IMEM 9 +#define SLAVE_PIMEM 10 +#define SLAVE_PCIE_0 11 +#define SLAVE_PCIE_1 12 +#define SLAVE_QDSS_STM 13 +#define SLAVE_TCU 14 + +#define MASTER_CNOC_DC_NOC 0 +#define SLAVE_LLCC_CFG 1 +#define SLAVE_GEM_NOC_CFG 2 + +#define MASTER_GPU_TCU 0 +#define MASTER_SYS_TCU 1 +#define MASTER_APPSS_PROC 2 +#define MASTER_COMPUTE_NOC 3 +#define MASTER_GEM_NOC_CFG 4 +#define MASTER_GFX3D 5 +#define MASTER_MNOC_HF_MEM_NOC 6 +#define MASTER_MNOC_SF_MEM_NOC 7 +#define MASTER_ANOC_PCIE_GEM_NOC 8 +#define MASTER_SNOC_GC_MEM_NOC 9 +#define MASTER_SNOC_SF_MEM_NOC 10 +#define SLAVE_MSS_PROC_MS_MPU_CFG 11 +#define SLAVE_MCDMA_MS_MPU_CFG 12 +#define SLAVE_GEM_NOC_CNOC 13 +#define SLAVE_LLCC 14 +#define SLAVE_MEM_NOC_PCIE_SNOC 15 +#define SLAVE_SERVICE_GEM_NOC_1 16 +#define SLAVE_SERVICE_GEM_NOC_2 17 +#define SLAVE_SERVICE_GEM_NOC 18 + +#define MASTER_CNOC_LPASS_AG_NOC 0 +#define SLAVE_LPASS_CORE_CFG 1 +#define SLAVE_LPASS_LPI_CFG 2 +#define SLAVE_LPASS_MPU_CFG 3 +#define SLAVE_LPASS_TOP_CFG 4 +#define SLAVE_SERVICES_LPASS_AML_NOC 5 +#define SLAVE_SERVICE_LPASS_AG_NOC 6 + +#define MASTER_LLCC 0 +#define SLAVE_EBI1 1 + +#define MASTER_CNOC_MNOC_CFG 0 +#define MASTER_VIDEO_P0 1 +#define MASTER_VIDEO_PROC 2 +#define MASTER_CAMNOC_HF 3 +#define MASTER_CAMNOC_ICP 4 +#define MASTER_CAMNOC_SF 5 +#define MASTER_MDP0 6 +#define SLAVE_MNOC_HF_MEM_NOC 7 +#define SLAVE_MNOC_SF_MEM_NOC 8 +#define SLAVE_SERVICE_MNOC 9 + +#define MASTER_CDSP_NOC_CFG 0 +#define MASTER_CDSP_PROC 1 +#define SLAVE_CDSP_MEM_NOC 2 +#define SLAVE_SERVICE_NSP_NOC 3 + +#define MASTER_A1NOC_SNOC 0 +#define MASTER_A2NOC_SNOC 1 +#define MASTER_SNOC_CFG 2 +#define MASTER_PIMEM 3 +#define MASTER_GIC 4 +#define SLAVE_SNOC_GEM_NOC_GC 5 +#define SLAVE_SNOC_GEM_NOC_SF 6 +#define SLAVE_SERVICE_SNOC 7 + +#endif diff --git a/include/dt-bindings/leds/rt4831-backlight.h b/include/dt-bindings/leds/rt4831-backlight.h new file mode 100644 index 0000000000..125c6351bb --- /dev/null +++ b/include/dt-bindings/leds/rt4831-backlight.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * This header provides constants for rt4831 backlight bindings. + * + * Copyright (C) 2020, Richtek Technology Corp. + * Author: ChiYuan Huang + */ + +#ifndef _DT_BINDINGS_RT4831_BACKLIGHT_H +#define _DT_BINDINGS_RT4831_BACKLIGHT_H + +#define RT4831_BLOVPLVL_17V 0 +#define RT4831_BLOVPLVL_21V 1 +#define RT4831_BLOVPLVL_25V 2 +#define RT4831_BLOVPLVL_29V 3 + +#define RT4831_BLED_CH1EN (1 << 0) +#define RT4831_BLED_CH2EN (1 << 1) +#define RT4831_BLED_CH3EN (1 << 2) +#define RT4831_BLED_CH4EN (1 << 3) +#define RT4831_BLED_ALLCHEN ((1 << 4) - 1) + +#endif /* _DT_BINDINGS_RT4831_BACKLIGHT_H */ diff --git a/include/dt-bindings/mailbox/qcom-ipcc.h b/include/dt-bindings/mailbox/qcom-ipcc.h index 4c23eefed5..eb91a6c05b 100644 --- a/include/dt-bindings/mailbox/qcom-ipcc.h +++ b/include/dt-bindings/mailbox/qcom-ipcc.h @@ -29,5 +29,6 @@ #define IPCC_CLIENT_PCIE1 14 #define IPCC_CLIENT_PCIE2 15 #define IPCC_CLIENT_SPSS 16 +#define IPCC_CLIENT_WPSS 24 #endif diff --git a/include/dt-bindings/mfd/qcom-pm8008.h b/include/dt-bindings/mfd/qcom-pm8008.h new file mode 100644 index 0000000000..eca9448df2 --- /dev/null +++ b/include/dt-bindings/mfd/qcom-pm8008.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021 The Linux Foundation. All rights reserved. + */ + +#ifndef __DT_BINDINGS_MFD_QCOM_PM8008_H +#define __DT_BINDINGS_MFD_QCOM_PM8008_H + +/* PM8008 IRQ numbers */ +#define PM8008_IRQ_MISC_UVLO 0 +#define PM8008_IRQ_MISC_OVLO 1 +#define PM8008_IRQ_MISC_OTST2 2 +#define PM8008_IRQ_MISC_OTST3 3 +#define PM8008_IRQ_MISC_LDO_OCP 4 +#define PM8008_IRQ_TEMP_ALARM 5 +#define PM8008_IRQ_GPIO1 6 +#define PM8008_IRQ_GPIO2 7 + +#endif diff --git a/include/dt-bindings/pinctrl/apple.h b/include/dt-bindings/pinctrl/apple.h new file mode 100644 index 0000000000..ea0a6f4665 --- /dev/null +++ b/include/dt-bindings/pinctrl/apple.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0+ OR MIT */ +/* + * This header provides constants for Apple pinctrl bindings. + */ + +#ifndef _DT_BINDINGS_PINCTRL_APPLE_H +#define _DT_BINDINGS_PINCTRL_APPLE_H + +#define APPLE_PINMUX(pin, func) ((pin) | ((func) << 16)) +#define APPLE_PIN(pinmux) ((pinmux) & 0xffff) +#define APPLE_FUNC(pinmux) ((pinmux) >> 16) + +#endif /* _DT_BINDINGS_PINCTRL_APPLE_H */ diff --git a/include/dt-bindings/pinctrl/mt8365-pinfunc.h b/include/dt-bindings/pinctrl/mt8365-pinfunc.h new file mode 100644 index 0000000000..e2ec8af57d --- /dev/null +++ b/include/dt-bindings/pinctrl/mt8365-pinfunc.h @@ -0,0 +1,858 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2021 MediaTek Inc. + */ +#ifndef __MT8365_PINFUNC_H +#define __MT8365_PINFUNC_H + +#include + +#define MT8365_PIN_0_GPIO0__FUNC_GPIO0 (MTK_PIN_NO(0) | 0) +#define MT8365_PIN_0_GPIO0__FUNC_DPI_D0 (MTK_PIN_NO(0) | 1) +#define MT8365_PIN_0_GPIO0__FUNC_PWM_A (MTK_PIN_NO(0) | 2) +#define MT8365_PIN_0_GPIO0__FUNC_I2S2_BCK (MTK_PIN_NO(0) | 3) +#define MT8365_PIN_0_GPIO0__FUNC_EXT_TXD0 (MTK_PIN_NO(0) | 4) +#define MT8365_PIN_0_GPIO0__FUNC_CONN_MCU_TDO (MTK_PIN_NO(0) | 5) +#define MT8365_PIN_0_GPIO0__FUNC_DBG_MON_A0 (MTK_PIN_NO(0) | 7) + +#define MT8365_PIN_1_GPIO1__FUNC_GPIO1 (MTK_PIN_NO(1) | 0) +#define MT8365_PIN_1_GPIO1__FUNC_DPI_D1 (MTK_PIN_NO(1) | 1) +#define MT8365_PIN_1_GPIO1__FUNC_PWM_B (MTK_PIN_NO(1) | 2) +#define MT8365_PIN_1_GPIO1__FUNC_I2S2_LRCK (MTK_PIN_NO(1) | 3) +#define MT8365_PIN_1_GPIO1__FUNC_EXT_TXD1 (MTK_PIN_NO(1) | 4) +#define MT8365_PIN_1_GPIO1__FUNC_CONN_MCU_DBGACK_N (MTK_PIN_NO(1) | 5) +#define MT8365_PIN_1_GPIO1__FUNC_DBG_MON_A1 (MTK_PIN_NO(1) | 7) + +#define MT8365_PIN_2_GPIO2__FUNC_GPIO2 (MTK_PIN_NO(2) | 0) +#define MT8365_PIN_2_GPIO2__FUNC_DPI_D2 (MTK_PIN_NO(2) | 1) +#define MT8365_PIN_2_GPIO2__FUNC_PWM_C (MTK_PIN_NO(2) | 2) +#define MT8365_PIN_2_GPIO2__FUNC_I2S2_MCK (MTK_PIN_NO(2) | 3) +#define MT8365_PIN_2_GPIO2__FUNC_EXT_TXD2 (MTK_PIN_NO(2) | 4) +#define MT8365_PIN_2_GPIO2__FUNC_CONN_MCU_DBGI_N (MTK_PIN_NO(2) | 5) +#define MT8365_PIN_2_GPIO2__FUNC_DBG_MON_A2 (MTK_PIN_NO(2) | 7) + +#define MT8365_PIN_3_GPIO3__FUNC_GPIO3 (MTK_PIN_NO(3) | 0) +#define MT8365_PIN_3_GPIO3__FUNC_DPI_D3 (MTK_PIN_NO(3) | 1) +#define MT8365_PIN_3_GPIO3__FUNC_CLKM0 (MTK_PIN_NO(3) | 2) +#define MT8365_PIN_3_GPIO3__FUNC_I2S2_DI (MTK_PIN_NO(3) | 3) +#define MT8365_PIN_3_GPIO3__FUNC_EXT_TXD3 (MTK_PIN_NO(3) | 4) +#define MT8365_PIN_3_GPIO3__FUNC_CONN_MCU_TCK (MTK_PIN_NO(3) | 5) +#define MT8365_PIN_3_GPIO3__FUNC_CONN_MCU_AICE_TCKC (MTK_PIN_NO(3) | 6) +#define MT8365_PIN_3_GPIO3__FUNC_DBG_MON_A3 (MTK_PIN_NO(3) | 7) + +#define MT8365_PIN_4_GPIO4__FUNC_GPIO4 (MTK_PIN_NO(4) | 0) +#define MT8365_PIN_4_GPIO4__FUNC_DPI_D4 (MTK_PIN_NO(4) | 1) +#define MT8365_PIN_4_GPIO4__FUNC_CLKM1 (MTK_PIN_NO(4) | 2) +#define MT8365_PIN_4_GPIO4__FUNC_I2S1_BCK (MTK_PIN_NO(4) | 3) +#define MT8365_PIN_4_GPIO4__FUNC_EXT_TXC (MTK_PIN_NO(4) | 4) +#define MT8365_PIN_4_GPIO4__FUNC_CONN_MCU_TDI (MTK_PIN_NO(4) | 5) +#define MT8365_PIN_4_GPIO4__FUNC_VDEC_TEST_CK (MTK_PIN_NO(4) | 6) +#define MT8365_PIN_4_GPIO4__FUNC_DBG_MON_A4 (MTK_PIN_NO(4) | 7) + +#define MT8365_PIN_5_GPIO5__FUNC_GPIO5 (MTK_PIN_NO(5) | 0) +#define MT8365_PIN_5_GPIO5__FUNC_DPI_D5 (MTK_PIN_NO(5) | 1) +#define MT8365_PIN_5_GPIO5__FUNC_CLKM2 (MTK_PIN_NO(5) | 2) +#define MT8365_PIN_5_GPIO5__FUNC_I2S1_LRCK (MTK_PIN_NO(5) | 3) +#define MT8365_PIN_5_GPIO5__FUNC_EXT_RXER (MTK_PIN_NO(5) | 4) +#define MT8365_PIN_5_GPIO5__FUNC_CONN_MCU_TRST_B (MTK_PIN_NO(5) | 5) +#define MT8365_PIN_5_GPIO5__FUNC_MM_TEST_CK (MTK_PIN_NO(5) | 6) +#define MT8365_PIN_5_GPIO5__FUNC_DBG_MON_A5 (MTK_PIN_NO(5) | 7) + +#define MT8365_PIN_6_GPIO6__FUNC_GPIO6 (MTK_PIN_NO(6) | 0) +#define MT8365_PIN_6_GPIO6__FUNC_DPI_D6 (MTK_PIN_NO(6) | 1) +#define MT8365_PIN_6_GPIO6__FUNC_CLKM3 (MTK_PIN_NO(6) | 2) +#define MT8365_PIN_6_GPIO6__FUNC_I2S1_MCK (MTK_PIN_NO(6) | 3) +#define MT8365_PIN_6_GPIO6__FUNC_EXT_RXC (MTK_PIN_NO(6) | 4) +#define MT8365_PIN_6_GPIO6__FUNC_CONN_MCU_TMS (MTK_PIN_NO(6) | 5) +#define MT8365_PIN_6_GPIO6__FUNC_CONN_MCU_AICE_TMSC (MTK_PIN_NO(6) | 6) +#define MT8365_PIN_6_GPIO6__FUNC_DBG_MON_A6 (MTK_PIN_NO(6) | 7) + +#define MT8365_PIN_7_GPIO7__FUNC_GPIO7 (MTK_PIN_NO(7) | 0) +#define MT8365_PIN_7_GPIO7__FUNC_DPI_D7 (MTK_PIN_NO(7) | 1) +#define MT8365_PIN_7_GPIO7__FUNC_I2S1_DO (MTK_PIN_NO(7) | 3) +#define MT8365_PIN_7_GPIO7__FUNC_EXT_RXDV (MTK_PIN_NO(7) | 4) +#define MT8365_PIN_7_GPIO7__FUNC_CONN_DSP_JCK (MTK_PIN_NO(7) | 5) +#define MT8365_PIN_7_GPIO7__FUNC_DBG_MON_A7 (MTK_PIN_NO(7) | 7) + +#define MT8365_PIN_8_GPIO8__FUNC_GPIO8 (MTK_PIN_NO(8) | 0) +#define MT8365_PIN_8_GPIO8__FUNC_DPI_D8 (MTK_PIN_NO(8) | 1) +#define MT8365_PIN_8_GPIO8__FUNC_SPI_CLK (MTK_PIN_NO(8) | 2) +#define MT8365_PIN_8_GPIO8__FUNC_I2S0_BCK (MTK_PIN_NO(8) | 3) +#define MT8365_PIN_8_GPIO8__FUNC_EXT_RXD0 (MTK_PIN_NO(8) | 4) +#define MT8365_PIN_8_GPIO8__FUNC_CONN_DSP_JINTP (MTK_PIN_NO(8) | 5) +#define MT8365_PIN_8_GPIO8__FUNC_DBG_MON_A8 (MTK_PIN_NO(8) | 7) + +#define MT8365_PIN_9_GPIO9__FUNC_GPIO9 (MTK_PIN_NO(9) | 0) +#define MT8365_PIN_9_GPIO9__FUNC_DPI_D9 (MTK_PIN_NO(9) | 1) +#define MT8365_PIN_9_GPIO9__FUNC_SPI_CSB (MTK_PIN_NO(9) | 2) +#define MT8365_PIN_9_GPIO9__FUNC_I2S0_LRCK (MTK_PIN_NO(9) | 3) +#define MT8365_PIN_9_GPIO9__FUNC_EXT_RXD1 (MTK_PIN_NO(9) | 4) +#define MT8365_PIN_9_GPIO9__FUNC_CONN_DSP_JDI (MTK_PIN_NO(9) | 5) +#define MT8365_PIN_9_GPIO9__FUNC_DBG_MON_A9 (MTK_PIN_NO(9) | 7) + +#define MT8365_PIN_10_GPIO10__FUNC_GPIO10 (MTK_PIN_NO(10) | 0) +#define MT8365_PIN_10_GPIO10__FUNC_DPI_D10 (MTK_PIN_NO(10) | 1) +#define MT8365_PIN_10_GPIO10__FUNC_SPI_MI (MTK_PIN_NO(10) | 2) +#define MT8365_PIN_10_GPIO10__FUNC_I2S0_MCK (MTK_PIN_NO(10) | 3) +#define MT8365_PIN_10_GPIO10__FUNC_EXT_RXD2 (MTK_PIN_NO(10) | 4) +#define MT8365_PIN_10_GPIO10__FUNC_CONN_DSP_JMS (MTK_PIN_NO(10) | 5) +#define MT8365_PIN_10_GPIO10__FUNC_DBG_MON_A10 (MTK_PIN_NO(10) | 7) + +#define MT8365_PIN_11_GPIO11__FUNC_GPIO11 (MTK_PIN_NO(11) | 0) +#define MT8365_PIN_11_GPIO11__FUNC_DPI_D11 (MTK_PIN_NO(11) | 1) +#define MT8365_PIN_11_GPIO11__FUNC_SPI_MO (MTK_PIN_NO(11) | 2) +#define MT8365_PIN_11_GPIO11__FUNC_I2S0_DI (MTK_PIN_NO(11) | 3) +#define MT8365_PIN_11_GPIO11__FUNC_EXT_RXD3 (MTK_PIN_NO(11) | 4) +#define MT8365_PIN_11_GPIO11__FUNC_CONN_DSP_JDO (MTK_PIN_NO(11) | 5) +#define MT8365_PIN_11_GPIO11__FUNC_DBG_MON_A11 (MTK_PIN_NO(11) | 7) + +#define MT8365_PIN_12_GPIO12__FUNC_GPIO12 (MTK_PIN_NO(12) | 0) +#define MT8365_PIN_12_GPIO12__FUNC_DPI_DE (MTK_PIN_NO(12) | 1) +#define MT8365_PIN_12_GPIO12__FUNC_UCTS1 (MTK_PIN_NO(12) | 2) +#define MT8365_PIN_12_GPIO12__FUNC_I2S3_BCK (MTK_PIN_NO(12) | 3) +#define MT8365_PIN_12_GPIO12__FUNC_EXT_TXEN (MTK_PIN_NO(12) | 4) +#define MT8365_PIN_12_GPIO12__FUNC_O_WIFI_TXD (MTK_PIN_NO(12) | 5) +#define MT8365_PIN_12_GPIO12__FUNC_DBG_MON_A12 (MTK_PIN_NO(12) | 7) + +#define MT8365_PIN_13_GPIO13__FUNC_GPIO13 (MTK_PIN_NO(13) | 0) +#define MT8365_PIN_13_GPIO13__FUNC_DPI_VSYNC (MTK_PIN_NO(13) | 1) +#define MT8365_PIN_13_GPIO13__FUNC_URTS1 (MTK_PIN_NO(13) | 2) +#define MT8365_PIN_13_GPIO13__FUNC_I2S3_LRCK (MTK_PIN_NO(13) | 3) +#define MT8365_PIN_13_GPIO13__FUNC_EXT_COL (MTK_PIN_NO(13) | 4) +#define MT8365_PIN_13_GPIO13__FUNC_SPDIF_IN (MTK_PIN_NO(13) | 5) +#define MT8365_PIN_13_GPIO13__FUNC_DBG_MON_A13 (MTK_PIN_NO(13) | 7) + +#define MT8365_PIN_14_GPIO14__FUNC_GPIO14 (MTK_PIN_NO(14) | 0) +#define MT8365_PIN_14_GPIO14__FUNC_DPI_CK (MTK_PIN_NO(14) | 1) +#define MT8365_PIN_14_GPIO14__FUNC_UCTS2 (MTK_PIN_NO(14) | 2) +#define MT8365_PIN_14_GPIO14__FUNC_I2S3_MCK (MTK_PIN_NO(14) | 3) +#define MT8365_PIN_14_GPIO14__FUNC_EXT_MDIO (MTK_PIN_NO(14) | 4) +#define MT8365_PIN_14_GPIO14__FUNC_SPDIF_OUT (MTK_PIN_NO(14) | 5) +#define MT8365_PIN_14_GPIO14__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(14) | 6) +#define MT8365_PIN_14_GPIO14__FUNC_DBG_MON_A14 (MTK_PIN_NO(14) | 7) + +#define MT8365_PIN_15_GPIO15__FUNC_GPIO15 (MTK_PIN_NO(15) | 0) +#define MT8365_PIN_15_GPIO15__FUNC_DPI_HSYNC (MTK_PIN_NO(15) | 1) +#define MT8365_PIN_15_GPIO15__FUNC_URTS2 (MTK_PIN_NO(15) | 2) +#define MT8365_PIN_15_GPIO15__FUNC_I2S3_DO (MTK_PIN_NO(15) | 3) +#define MT8365_PIN_15_GPIO15__FUNC_EXT_MDC (MTK_PIN_NO(15) | 4) +#define MT8365_PIN_15_GPIO15__FUNC_IRRX (MTK_PIN_NO(15) | 5) +#define MT8365_PIN_15_GPIO15__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(15) | 6) +#define MT8365_PIN_15_GPIO15__FUNC_DBG_MON_A15 (MTK_PIN_NO(15) | 7) + +#define MT8365_PIN_16_GPIO16__FUNC_GPIO16 (MTK_PIN_NO(16) | 0) +#define MT8365_PIN_16_GPIO16__FUNC_DPI_D12 (MTK_PIN_NO(16) | 1) +#define MT8365_PIN_16_GPIO16__FUNC_USB_DRVVBUS (MTK_PIN_NO(16) | 2) +#define MT8365_PIN_16_GPIO16__FUNC_PWM_A (MTK_PIN_NO(16) | 3) +#define MT8365_PIN_16_GPIO16__FUNC_CLKM0 (MTK_PIN_NO(16) | 4) +#define MT8365_PIN_16_GPIO16__FUNC_ANT_SEL0 (MTK_PIN_NO(16) | 5) +#define MT8365_PIN_16_GPIO16__FUNC_TSF_IN (MTK_PIN_NO(16) | 6) +#define MT8365_PIN_16_GPIO16__FUNC_DBG_MON_A16 (MTK_PIN_NO(16) | 7) + +#define MT8365_PIN_17_GPIO17__FUNC_GPIO17 (MTK_PIN_NO(17) | 0) +#define MT8365_PIN_17_GPIO17__FUNC_DPI_D13 (MTK_PIN_NO(17) | 1) +#define MT8365_PIN_17_GPIO17__FUNC_IDDIG (MTK_PIN_NO(17) | 2) +#define MT8365_PIN_17_GPIO17__FUNC_PWM_B (MTK_PIN_NO(17) | 3) +#define MT8365_PIN_17_GPIO17__FUNC_CLKM1 (MTK_PIN_NO(17) | 4) +#define MT8365_PIN_17_GPIO17__FUNC_ANT_SEL1 (MTK_PIN_NO(17) | 5) +#define MT8365_PIN_17_GPIO17__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(17) | 6) +#define MT8365_PIN_17_GPIO17__FUNC_DBG_MON_A17 (MTK_PIN_NO(17) | 7) + +#define MT8365_PIN_18_GPIO18__FUNC_GPIO18 (MTK_PIN_NO(18) | 0) +#define MT8365_PIN_18_GPIO18__FUNC_DPI_D14 (MTK_PIN_NO(18) | 1) +#define MT8365_PIN_18_GPIO18__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(18) | 2) +#define MT8365_PIN_18_GPIO18__FUNC_PWM_C (MTK_PIN_NO(18) | 3) +#define MT8365_PIN_18_GPIO18__FUNC_CLKM2 (MTK_PIN_NO(18) | 4) +#define MT8365_PIN_18_GPIO18__FUNC_ANT_SEL2 (MTK_PIN_NO(18) | 5) +#define MT8365_PIN_18_GPIO18__FUNC_MFG_TEST_CK (MTK_PIN_NO(18) | 6) +#define MT8365_PIN_18_GPIO18__FUNC_DBG_MON_A18 (MTK_PIN_NO(18) | 7) + +#define MT8365_PIN_19_DISP_PWM__FUNC_GPIO19 (MTK_PIN_NO(19) | 0) +#define MT8365_PIN_19_DISP_PWM__FUNC_DISP_PWM (MTK_PIN_NO(19) | 1) +#define MT8365_PIN_19_DISP_PWM__FUNC_PWM_A (MTK_PIN_NO(19) | 2) +#define MT8365_PIN_19_DISP_PWM__FUNC_DBG_MON_A19 (MTK_PIN_NO(19) | 7) + +#define MT8365_PIN_20_LCM_RST__FUNC_GPIO20 (MTK_PIN_NO(20) | 0) +#define MT8365_PIN_20_LCM_RST__FUNC_LCM_RST (MTK_PIN_NO(20) | 1) +#define MT8365_PIN_20_LCM_RST__FUNC_PWM_B (MTK_PIN_NO(20) | 2) +#define MT8365_PIN_20_LCM_RST__FUNC_DBG_MON_A20 (MTK_PIN_NO(20) | 7) + +#define MT8365_PIN_21_DSI_TE__FUNC_GPIO21 (MTK_PIN_NO(21) | 0) +#define MT8365_PIN_21_DSI_TE__FUNC_DSI_TE (MTK_PIN_NO(21) | 1) +#define MT8365_PIN_21_DSI_TE__FUNC_PWM_C (MTK_PIN_NO(21) | 2) +#define MT8365_PIN_21_DSI_TE__FUNC_ANT_SEL0 (MTK_PIN_NO(21) | 3) +#define MT8365_PIN_21_DSI_TE__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(21) | 4) +#define MT8365_PIN_21_DSI_TE__FUNC_DBG_MON_A21 (MTK_PIN_NO(21) | 7) + +#define MT8365_PIN_22_KPROW0__FUNC_GPIO22 (MTK_PIN_NO(22) | 0) +#define MT8365_PIN_22_KPROW0__FUNC_KPROW0 (MTK_PIN_NO(22) | 1) +#define MT8365_PIN_22_KPROW0__FUNC_DBG_MON_A22 (MTK_PIN_NO(22) | 7) + +#define MT8365_PIN_23_KPROW1__FUNC_GPIO23 (MTK_PIN_NO(23) | 0) +#define MT8365_PIN_23_KPROW1__FUNC_KPROW1 (MTK_PIN_NO(23) | 1) +#define MT8365_PIN_23_KPROW1__FUNC_IDDIG (MTK_PIN_NO(23) | 2) +#define MT8365_PIN_23_KPROW1__FUNC_WIFI_TXD (MTK_PIN_NO(23) | 3) +#define MT8365_PIN_23_KPROW1__FUNC_CLKM3 (MTK_PIN_NO(23) | 4) +#define MT8365_PIN_23_KPROW1__FUNC_ANT_SEL1 (MTK_PIN_NO(23) | 5) +#define MT8365_PIN_23_KPROW1__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(23) | 6) +#define MT8365_PIN_23_KPROW1__FUNC_DBG_MON_B0 (MTK_PIN_NO(23) | 7) + +#define MT8365_PIN_24_KPCOL0__FUNC_GPIO24 (MTK_PIN_NO(24) | 0) +#define MT8365_PIN_24_KPCOL0__FUNC_KPCOL0 (MTK_PIN_NO(24) | 1) +#define MT8365_PIN_24_KPCOL0__FUNC_DBG_MON_A23 (MTK_PIN_NO(24) | 7) + +#define MT8365_PIN_25_KPCOL1__FUNC_GPIO25 (MTK_PIN_NO(25) | 0) +#define MT8365_PIN_25_KPCOL1__FUNC_KPCOL1 (MTK_PIN_NO(25) | 1) +#define MT8365_PIN_25_KPCOL1__FUNC_USB_DRVVBUS (MTK_PIN_NO(25) | 2) +#define MT8365_PIN_25_KPCOL1__FUNC_APU_JTAG_TRST (MTK_PIN_NO(25) | 3) +#define MT8365_PIN_25_KPCOL1__FUNC_UDI_NTRST_XI (MTK_PIN_NO(25) | 4) +#define MT8365_PIN_25_KPCOL1__FUNC_DFD_NTRST_XI (MTK_PIN_NO(25) | 5) +#define MT8365_PIN_25_KPCOL1__FUNC_CONN_TEST_CK (MTK_PIN_NO(25) | 6) +#define MT8365_PIN_25_KPCOL1__FUNC_DBG_MON_B1 (MTK_PIN_NO(25) | 7) + +#define MT8365_PIN_26_SPI_CS__FUNC_GPIO26 (MTK_PIN_NO(26) | 0) +#define MT8365_PIN_26_SPI_CS__FUNC_SPI_CSB (MTK_PIN_NO(26) | 1) +#define MT8365_PIN_26_SPI_CS__FUNC_APU_JTAG_TMS (MTK_PIN_NO(26) | 3) +#define MT8365_PIN_26_SPI_CS__FUNC_UDI_TMS_XI (MTK_PIN_NO(26) | 4) +#define MT8365_PIN_26_SPI_CS__FUNC_DFD_TMS_XI (MTK_PIN_NO(26) | 5) +#define MT8365_PIN_26_SPI_CS__FUNC_CONN_TEST_CK (MTK_PIN_NO(26) | 6) +#define MT8365_PIN_26_SPI_CS__FUNC_DBG_MON_A24 (MTK_PIN_NO(26) | 7) + +#define MT8365_PIN_27_SPI_CK__FUNC_GPIO27 (MTK_PIN_NO(27) | 0) +#define MT8365_PIN_27_SPI_CK__FUNC_SPI_CLK (MTK_PIN_NO(27) | 1) +#define MT8365_PIN_27_SPI_CK__FUNC_APU_JTAG_TCK (MTK_PIN_NO(27) | 3) +#define MT8365_PIN_27_SPI_CK__FUNC_UDI_TCK_XI (MTK_PIN_NO(27) | 4) +#define MT8365_PIN_27_SPI_CK__FUNC_DFD_TCK_XI (MTK_PIN_NO(27) | 5) +#define MT8365_PIN_27_SPI_CK__FUNC_APU_TEST_CK (MTK_PIN_NO(27) | 6) +#define MT8365_PIN_27_SPI_CK__FUNC_DBG_MON_A25 (MTK_PIN_NO(27) | 7) + +#define MT8365_PIN_28_SPI_MI__FUNC_GPIO28 (MTK_PIN_NO(28) | 0) +#define MT8365_PIN_28_SPI_MI__FUNC_SPI_MI (MTK_PIN_NO(28) | 1) +#define MT8365_PIN_28_SPI_MI__FUNC_SPI_MO (MTK_PIN_NO(28) | 2) +#define MT8365_PIN_28_SPI_MI__FUNC_APU_JTAG_TDI (MTK_PIN_NO(28) | 3) +#define MT8365_PIN_28_SPI_MI__FUNC_UDI_TDI_XI (MTK_PIN_NO(28) | 4) +#define MT8365_PIN_28_SPI_MI__FUNC_DFD_TDI_XI (MTK_PIN_NO(28) | 5) +#define MT8365_PIN_28_SPI_MI__FUNC_DSP_TEST_CK (MTK_PIN_NO(28) | 6) +#define MT8365_PIN_28_SPI_MI__FUNC_DBG_MON_A26 (MTK_PIN_NO(28) | 7) + +#define MT8365_PIN_29_SPI_MO__FUNC_GPIO29 (MTK_PIN_NO(29) | 0) +#define MT8365_PIN_29_SPI_MO__FUNC_SPI_MO (MTK_PIN_NO(29) | 1) +#define MT8365_PIN_29_SPI_MO__FUNC_SPI_MI (MTK_PIN_NO(29) | 2) +#define MT8365_PIN_29_SPI_MO__FUNC_APU_JTAG_TDO (MTK_PIN_NO(29) | 3) +#define MT8365_PIN_29_SPI_MO__FUNC_UDI_TDO (MTK_PIN_NO(29) | 4) +#define MT8365_PIN_29_SPI_MO__FUNC_DFD_TDO (MTK_PIN_NO(29) | 5) +#define MT8365_PIN_29_SPI_MO__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(29) | 6) +#define MT8365_PIN_29_SPI_MO__FUNC_DBG_MON_A27 (MTK_PIN_NO(29) | 7) + +#define MT8365_PIN_30_JTMS__FUNC_GPIO30 (MTK_PIN_NO(30) | 0) +#define MT8365_PIN_30_JTMS__FUNC_JTMS (MTK_PIN_NO(30) | 1) +#define MT8365_PIN_30_JTMS__FUNC_DFD_TMS_XI (MTK_PIN_NO(30) | 2) +#define MT8365_PIN_30_JTMS__FUNC_UDI_TMS_XI (MTK_PIN_NO(30) | 3) +#define MT8365_PIN_30_JTMS__FUNC_MCU_SPM_TMS (MTK_PIN_NO(30) | 4) +#define MT8365_PIN_30_JTMS__FUNC_CONN_MCU_TMS (MTK_PIN_NO(30) | 5) +#define MT8365_PIN_30_JTMS__FUNC_CONN_MCU_AICE_TMSC (MTK_PIN_NO(30) | 6) + +#define MT8365_PIN_31_JTCK__FUNC_GPIO31 (MTK_PIN_NO(31) | 0) +#define MT8365_PIN_31_JTCK__FUNC_JTCK (MTK_PIN_NO(31) | 1) +#define MT8365_PIN_31_JTCK__FUNC_DFD_TCK_XI (MTK_PIN_NO(31) | 2) +#define MT8365_PIN_31_JTCK__FUNC_UDI_TCK_XI (MTK_PIN_NO(31) | 3) +#define MT8365_PIN_31_JTCK__FUNC_MCU_SPM_TCK (MTK_PIN_NO(31) | 4) +#define MT8365_PIN_31_JTCK__FUNC_CONN_MCU_TCK (MTK_PIN_NO(31) | 5) +#define MT8365_PIN_31_JTCK__FUNC_CONN_MCU_AICE_TCKC (MTK_PIN_NO(31) | 6) + +#define MT8365_PIN_32_JTDI__FUNC_GPIO32 (MTK_PIN_NO(32) | 0) +#define MT8365_PIN_32_JTDI__FUNC_JTDI (MTK_PIN_NO(32) | 1) +#define MT8365_PIN_32_JTDI__FUNC_DFD_TDI_XI (MTK_PIN_NO(32) | 2) +#define MT8365_PIN_32_JTDI__FUNC_UDI_TDI_XI (MTK_PIN_NO(32) | 3) +#define MT8365_PIN_32_JTDI__FUNC_MCU_SPM_TDI (MTK_PIN_NO(32) | 4) +#define MT8365_PIN_32_JTDI__FUNC_CONN_MCU_TDI (MTK_PIN_NO(32) | 5) + +#define MT8365_PIN_33_JTDO__FUNC_GPIO33 (MTK_PIN_NO(33) | 0) +#define MT8365_PIN_33_JTDO__FUNC_JTDO (MTK_PIN_NO(33) | 1) +#define MT8365_PIN_33_JTDO__FUNC_DFD_TDO (MTK_PIN_NO(33) | 2) +#define MT8365_PIN_33_JTDO__FUNC_UDI_TDO (MTK_PIN_NO(33) | 3) +#define MT8365_PIN_33_JTDO__FUNC_MCU_SPM_TDO (MTK_PIN_NO(33) | 4) +#define MT8365_PIN_33_JTDO__FUNC_CONN_MCU_TDO (MTK_PIN_NO(33) | 5) + +#define MT8365_PIN_34_JTRST__FUNC_GPIO34 (MTK_PIN_NO(34) | 0) +#define MT8365_PIN_34_JTRST__FUNC_JTRST (MTK_PIN_NO(34) | 1) +#define MT8365_PIN_34_JTRST__FUNC_DFD_NTRST_XI (MTK_PIN_NO(34) | 2) +#define MT8365_PIN_34_JTRST__FUNC_UDI_NTRST_XI (MTK_PIN_NO(34) | 3) +#define MT8365_PIN_34_JTRST__FUNC_MCU_SPM_NTRST (MTK_PIN_NO(34) | 4) +#define MT8365_PIN_34_JTRST__FUNC_CONN_MCU_TRST_B (MTK_PIN_NO(34) | 5) + +#define MT8365_PIN_35_URXD0__FUNC_GPIO35 (MTK_PIN_NO(35) | 0) +#define MT8365_PIN_35_URXD0__FUNC_URXD0 (MTK_PIN_NO(35) | 1) +#define MT8365_PIN_35_URXD0__FUNC_UTXD0 (MTK_PIN_NO(35) | 2) +#define MT8365_PIN_35_URXD0__FUNC_DSP_URXD0 (MTK_PIN_NO(35) | 7) + +#define MT8365_PIN_36_UTXD0__FUNC_GPIO36 (MTK_PIN_NO(36) | 0) +#define MT8365_PIN_36_UTXD0__FUNC_UTXD0 (MTK_PIN_NO(36) | 1) +#define MT8365_PIN_36_UTXD0__FUNC_URXD0 (MTK_PIN_NO(36) | 2) +#define MT8365_PIN_36_UTXD0__FUNC_DSP_UTXD0 (MTK_PIN_NO(36) | 7) + +#define MT8365_PIN_37_URXD1__FUNC_GPIO37 (MTK_PIN_NO(37) | 0) +#define MT8365_PIN_37_URXD1__FUNC_URXD1 (MTK_PIN_NO(37) | 1) +#define MT8365_PIN_37_URXD1__FUNC_UTXD1 (MTK_PIN_NO(37) | 2) +#define MT8365_PIN_37_URXD1__FUNC_UCTS2 (MTK_PIN_NO(37) | 3) +#define MT8365_PIN_37_URXD1__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(37) | 4) +#define MT8365_PIN_37_URXD1__FUNC_CONN_UART0_RXD (MTK_PIN_NO(37) | 5) +#define MT8365_PIN_37_URXD1__FUNC_I2S0_MCK (MTK_PIN_NO(37) | 6) +#define MT8365_PIN_37_URXD1__FUNC_DSP_URXD0 (MTK_PIN_NO(37) | 7) + +#define MT8365_PIN_38_UTXD1__FUNC_GPIO38 (MTK_PIN_NO(38) | 0) +#define MT8365_PIN_38_UTXD1__FUNC_UTXD1 (MTK_PIN_NO(38) | 1) +#define MT8365_PIN_38_UTXD1__FUNC_URXD1 (MTK_PIN_NO(38) | 2) +#define MT8365_PIN_38_UTXD1__FUNC_URTS2 (MTK_PIN_NO(38) | 3) +#define MT8365_PIN_38_UTXD1__FUNC_ANT_SEL2 (MTK_PIN_NO(38) | 4) +#define MT8365_PIN_38_UTXD1__FUNC_CONN_UART0_TXD (MTK_PIN_NO(38) | 5) +#define MT8365_PIN_38_UTXD1__FUNC_I2S1_MCK (MTK_PIN_NO(38) | 6) +#define MT8365_PIN_38_UTXD1__FUNC_DSP_UTXD0 (MTK_PIN_NO(38) | 7) + +#define MT8365_PIN_39_URXD2__FUNC_GPIO39 (MTK_PIN_NO(39) | 0) +#define MT8365_PIN_39_URXD2__FUNC_URXD2 (MTK_PIN_NO(39) | 1) +#define MT8365_PIN_39_URXD2__FUNC_UTXD2 (MTK_PIN_NO(39) | 2) +#define MT8365_PIN_39_URXD2__FUNC_UCTS1 (MTK_PIN_NO(39) | 3) +#define MT8365_PIN_39_URXD2__FUNC_IDDIG (MTK_PIN_NO(39) | 4) +#define MT8365_PIN_39_URXD2__FUNC_CONN_MCU_DBGACK_N (MTK_PIN_NO(39) | 5) +#define MT8365_PIN_39_URXD2__FUNC_I2S2_MCK (MTK_PIN_NO(39) | 6) +#define MT8365_PIN_39_URXD2__FUNC_DSP_URXD0 (MTK_PIN_NO(39) | 7) + +#define MT8365_PIN_40_UTXD2__FUNC_GPIO40 (MTK_PIN_NO(40) | 0) +#define MT8365_PIN_40_UTXD2__FUNC_UTXD2 (MTK_PIN_NO(40) | 1) +#define MT8365_PIN_40_UTXD2__FUNC_URXD2 (MTK_PIN_NO(40) | 2) +#define MT8365_PIN_40_UTXD2__FUNC_URTS1 (MTK_PIN_NO(40) | 3) +#define MT8365_PIN_40_UTXD2__FUNC_USB_DRVVBUS (MTK_PIN_NO(40) | 4) +#define MT8365_PIN_40_UTXD2__FUNC_CONN_MCU_DBGI_N (MTK_PIN_NO(40) | 5) +#define MT8365_PIN_40_UTXD2__FUNC_I2S3_MCK (MTK_PIN_NO(40) | 6) +#define MT8365_PIN_40_UTXD2__FUNC_DSP_UTXD0 (MTK_PIN_NO(40) | 7) + +#define MT8365_PIN_41_PWRAP_SPI0_MI__FUNC_GPIO41 (MTK_PIN_NO(41) | 0) +#define MT8365_PIN_41_PWRAP_SPI0_MI__FUNC_PWRAP_SPI0_MI (MTK_PIN_NO(41) | 1) +#define MT8365_PIN_41_PWRAP_SPI0_MI__FUNC_PWRAP_SPI0_MO (MTK_PIN_NO(41) | 2) + +#define MT8365_PIN_42_PWRAP_SPI0_MO__FUNC_GPIO42 (MTK_PIN_NO(42) | 0) +#define MT8365_PIN_42_PWRAP_SPI0_MO__FUNC_PWRAP_SPI0_MO (MTK_PIN_NO(42) | 1) +#define MT8365_PIN_42_PWRAP_SPI0_MO__FUNC_PWRAP_SPI0_MI (MTK_PIN_NO(42) | 2) + +#define MT8365_PIN_43_PWRAP_SPI0_CK__FUNC_GPIO43 (MTK_PIN_NO(43) | 0) +#define MT8365_PIN_43_PWRAP_SPI0_CK__FUNC_PWRAP_SPI0_CK (MTK_PIN_NO(43) | 1) + +#define MT8365_PIN_44_PWRAP_SPI0_CSN__FUNC_GPIO44 (MTK_PIN_NO(44) | 0) +#define MT8365_PIN_44_PWRAP_SPI0_CSN__FUNC_PWRAP_SPI0_CSN (MTK_PIN_NO(44) | 1) + +#define MT8365_PIN_45_RTC32K_CK__FUNC_GPIO45 (MTK_PIN_NO(45) | 0) +#define MT8365_PIN_45_RTC32K_CK__FUNC_RTC32K_CK (MTK_PIN_NO(45) | 1) + +#define MT8365_PIN_46_WATCHDOG__FUNC_GPIO46 (MTK_PIN_NO(46) | 0) +#define MT8365_PIN_46_WATCHDOG__FUNC_WATCHDOG (MTK_PIN_NO(46) | 1) + +#define MT8365_PIN_47_SRCLKENA0__FUNC_GPIO47 (MTK_PIN_NO(47) | 0) +#define MT8365_PIN_47_SRCLKENA0__FUNC_SRCLKENA0 (MTK_PIN_NO(47) | 1) +#define MT8365_PIN_47_SRCLKENA0__FUNC_SRCLKENA1 (MTK_PIN_NO(47) | 2) + +#define MT8365_PIN_48_SRCLKENA1__FUNC_GPIO48 (MTK_PIN_NO(48) | 0) +#define MT8365_PIN_48_SRCLKENA1__FUNC_SRCLKENA1 (MTK_PIN_NO(48) | 1) + +#define MT8365_PIN_49_AUD_CLK_MOSI__FUNC_GPIO49 (MTK_PIN_NO(49) | 0) +#define MT8365_PIN_49_AUD_CLK_MOSI__FUNC_AUD_CLK_MOSI (MTK_PIN_NO(49) | 1) +#define MT8365_PIN_49_AUD_CLK_MOSI__FUNC_AUD_CLK_MISO (MTK_PIN_NO(49) | 2) +#define MT8365_PIN_49_AUD_CLK_MOSI__FUNC_I2S1_MCK (MTK_PIN_NO(49) | 3) + +#define MT8365_PIN_50_AUD_SYNC_MOSI__FUNC_GPIO50 (MTK_PIN_NO(50) | 0) +#define MT8365_PIN_50_AUD_SYNC_MOSI__FUNC_AUD_SYNC_MOSI (MTK_PIN_NO(50) | 1) +#define MT8365_PIN_50_AUD_SYNC_MOSI__FUNC_AUD_SYNC_MISO (MTK_PIN_NO(50) | 2) +#define MT8365_PIN_50_AUD_SYNC_MOSI__FUNC_I2S1_BCK (MTK_PIN_NO(50) | 3) + +#define MT8365_PIN_51_AUD_DAT_MOSI0__FUNC_GPIO51 (MTK_PIN_NO(51) | 0) +#define MT8365_PIN_51_AUD_DAT_MOSI0__FUNC_AUD_DAT_MOSI0 (MTK_PIN_NO(51) | 1) +#define MT8365_PIN_51_AUD_DAT_MOSI0__FUNC_AUD_DAT_MISO0 (MTK_PIN_NO(51) | 2) +#define MT8365_PIN_51_AUD_DAT_MOSI0__FUNC_I2S1_LRCK (MTK_PIN_NO(51) | 3) + +#define MT8365_PIN_52_AUD_DAT_MOSI1__FUNC_GPIO52 (MTK_PIN_NO(52) | 0) +#define MT8365_PIN_52_AUD_DAT_MOSI1__FUNC_AUD_DAT_MOSI1 (MTK_PIN_NO(52) | 1) +#define MT8365_PIN_52_AUD_DAT_MOSI1__FUNC_AUD_DAT_MISO1 (MTK_PIN_NO(52) | 2) +#define MT8365_PIN_52_AUD_DAT_MOSI1__FUNC_I2S1_DO (MTK_PIN_NO(52) | 3) + +#define MT8365_PIN_53_AUD_CLK_MISO__FUNC_GPIO53 (MTK_PIN_NO(53) | 0) +#define MT8365_PIN_53_AUD_CLK_MISO__FUNC_AUD_CLK_MISO (MTK_PIN_NO(53) | 1) +#define MT8365_PIN_53_AUD_CLK_MISO__FUNC_AUD_CLK_MOSI (MTK_PIN_NO(53) | 2) +#define MT8365_PIN_53_AUD_CLK_MISO__FUNC_I2S2_MCK (MTK_PIN_NO(53) | 3) + +#define MT8365_PIN_54_AUD_SYNC_MISO__FUNC_GPIO54 (MTK_PIN_NO(54) | 0) +#define MT8365_PIN_54_AUD_SYNC_MISO__FUNC_AUD_SYNC_MISO (MTK_PIN_NO(54) | 1) +#define MT8365_PIN_54_AUD_SYNC_MISO__FUNC_AUD_SYNC_MOSI (MTK_PIN_NO(54) | 2) +#define MT8365_PIN_54_AUD_SYNC_MISO__FUNC_I2S2_BCK (MTK_PIN_NO(54) | 3) + +#define MT8365_PIN_55_AUD_DAT_MISO0__FUNC_GPIO55 (MTK_PIN_NO(55) | 0) +#define MT8365_PIN_55_AUD_DAT_MISO0__FUNC_AUD_DAT_MISO0 (MTK_PIN_NO(55) | 1) +#define MT8365_PIN_55_AUD_DAT_MISO0__FUNC_AUD_DAT_MOSI0 (MTK_PIN_NO(55) | 2) +#define MT8365_PIN_55_AUD_DAT_MISO0__FUNC_I2S2_LRCK (MTK_PIN_NO(55) | 3) + +#define MT8365_PIN_56_AUD_DAT_MISO1__FUNC_GPIO56 (MTK_PIN_NO(56) | 0) +#define MT8365_PIN_56_AUD_DAT_MISO1__FUNC_AUD_DAT_MISO1 (MTK_PIN_NO(56) | 1) +#define MT8365_PIN_56_AUD_DAT_MISO1__FUNC_AUD_DAT_MOSI1 (MTK_PIN_NO(56) | 2) +#define MT8365_PIN_56_AUD_DAT_MISO1__FUNC_I2S2_DI (MTK_PIN_NO(56) | 3) + +#define MT8365_PIN_57_SDA0__FUNC_GPIO57 (MTK_PIN_NO(57) | 0) +#define MT8365_PIN_57_SDA0__FUNC_SDA0_0 (MTK_PIN_NO(57) | 1) + +#define MT8365_PIN_58_SCL0__FUNC_GPIO58 (MTK_PIN_NO(58) | 0) +#define MT8365_PIN_58_SCL0__FUNC_SCL0_0 (MTK_PIN_NO(58) | 1) + +#define MT8365_PIN_59_SDA1__FUNC_GPIO59 (MTK_PIN_NO(59) | 0) +#define MT8365_PIN_59_SDA1__FUNC_SDA1_0 (MTK_PIN_NO(59) | 1) +#define MT8365_PIN_59_SDA1__FUNC_USB_SDA (MTK_PIN_NO(59) | 6) +#define MT8365_PIN_59_SDA1__FUNC_DBG_SDA (MTK_PIN_NO(59) | 7) + +#define MT8365_PIN_60_SCL1__FUNC_GPIO60 (MTK_PIN_NO(60) | 0) +#define MT8365_PIN_60_SCL1__FUNC_SCL1_0 (MTK_PIN_NO(60) | 1) +#define MT8365_PIN_60_SCL1__FUNC_USB_SCL (MTK_PIN_NO(60) | 6) +#define MT8365_PIN_60_SCL1__FUNC_DBG_SCL (MTK_PIN_NO(60) | 7) + +#define MT8365_PIN_61_SDA2__FUNC_GPIO61 (MTK_PIN_NO(61) | 0) +#define MT8365_PIN_61_SDA2__FUNC_SDA2_0 (MTK_PIN_NO(61) | 1) + +#define MT8365_PIN_62_SCL2__FUNC_GPIO62 (MTK_PIN_NO(62) | 0) +#define MT8365_PIN_62_SCL2__FUNC_SCL2_0 (MTK_PIN_NO(62) | 1) + +#define MT8365_PIN_63_SDA3__FUNC_GPIO63 (MTK_PIN_NO(63) | 0) +#define MT8365_PIN_63_SDA3__FUNC_SDA3_0 (MTK_PIN_NO(63) | 1) + +#define MT8365_PIN_64_SCL3__FUNC_GPIO64 (MTK_PIN_NO(64) | 0) +#define MT8365_PIN_64_SCL3__FUNC_SCL3_0 (MTK_PIN_NO(64) | 1) + +#define MT8365_PIN_65_CMMCLK0__FUNC_GPIO65 (MTK_PIN_NO(65) | 0) +#define MT8365_PIN_65_CMMCLK0__FUNC_CMMCLK0 (MTK_PIN_NO(65) | 1) +#define MT8365_PIN_65_CMMCLK0__FUNC_CMMCLK1 (MTK_PIN_NO(65) | 2) +#define MT8365_PIN_65_CMMCLK0__FUNC_DBG_MON_A28 (MTK_PIN_NO(65) | 7) + +#define MT8365_PIN_66_CMMCLK1__FUNC_GPIO66 (MTK_PIN_NO(66) | 0) +#define MT8365_PIN_66_CMMCLK1__FUNC_CMMCLK1 (MTK_PIN_NO(66) | 1) +#define MT8365_PIN_66_CMMCLK1__FUNC_CMMCLK0 (MTK_PIN_NO(66) | 2) +#define MT8365_PIN_66_CMMCLK1__FUNC_DBG_MON_B2 (MTK_PIN_NO(66) | 7) + +#define MT8365_PIN_67_CMPCLK__FUNC_GPIO67 (MTK_PIN_NO(67) | 0) +#define MT8365_PIN_67_CMPCLK__FUNC_CMPCLK (MTK_PIN_NO(67) | 1) +#define MT8365_PIN_67_CMPCLK__FUNC_ANT_SEL0 (MTK_PIN_NO(67) | 2) +#define MT8365_PIN_67_CMPCLK__FUNC_TDM_RX_BCK (MTK_PIN_NO(67) | 4) +#define MT8365_PIN_67_CMPCLK__FUNC_I2S0_BCK (MTK_PIN_NO(67) | 5) +#define MT8365_PIN_67_CMPCLK__FUNC_DBG_MON_B3 (MTK_PIN_NO(67) | 7) + +#define MT8365_PIN_68_CMDAT0__FUNC_GPIO68 (MTK_PIN_NO(68) | 0) +#define MT8365_PIN_68_CMDAT0__FUNC_CMDAT0 (MTK_PIN_NO(68) | 1) +#define MT8365_PIN_68_CMDAT0__FUNC_ANT_SEL1 (MTK_PIN_NO(68) | 2) +#define MT8365_PIN_68_CMDAT0__FUNC_TDM_RX_LRCK (MTK_PIN_NO(68) | 4) +#define MT8365_PIN_68_CMDAT0__FUNC_I2S0_LRCK (MTK_PIN_NO(68) | 5) +#define MT8365_PIN_68_CMDAT0__FUNC_DBG_MON_B4 (MTK_PIN_NO(68) | 7) + +#define MT8365_PIN_69_CMDAT1__FUNC_GPIO69 (MTK_PIN_NO(69) | 0) +#define MT8365_PIN_69_CMDAT1__FUNC_CMDAT1 (MTK_PIN_NO(69) | 1) +#define MT8365_PIN_69_CMDAT1__FUNC_ANT_SEL2 (MTK_PIN_NO(69) | 2) +#define MT8365_PIN_69_CMDAT1__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(69) | 3) +#define MT8365_PIN_69_CMDAT1__FUNC_TDM_RX_MCK (MTK_PIN_NO(69) | 4) +#define MT8365_PIN_69_CMDAT1__FUNC_I2S0_MCK (MTK_PIN_NO(69) | 5) +#define MT8365_PIN_69_CMDAT1__FUNC_DBG_MON_B5 (MTK_PIN_NO(69) | 7) + +#define MT8365_PIN_70_CMDAT2__FUNC_GPIO70 (MTK_PIN_NO(70) | 0) +#define MT8365_PIN_70_CMDAT2__FUNC_CMDAT2 (MTK_PIN_NO(70) | 1) +#define MT8365_PIN_70_CMDAT2__FUNC_ANT_SEL3 (MTK_PIN_NO(70) | 2) +#define MT8365_PIN_70_CMDAT2__FUNC_TDM_RX_DI (MTK_PIN_NO(70) | 4) +#define MT8365_PIN_70_CMDAT2__FUNC_I2S0_DI (MTK_PIN_NO(70) | 5) +#define MT8365_PIN_70_CMDAT2__FUNC_DBG_MON_B6 (MTK_PIN_NO(70) | 7) + +#define MT8365_PIN_71_CMDAT3__FUNC_GPIO71 (MTK_PIN_NO(71) | 0) +#define MT8365_PIN_71_CMDAT3__FUNC_CMDAT3 (MTK_PIN_NO(71) | 1) +#define MT8365_PIN_71_CMDAT3__FUNC_ANT_SEL4 (MTK_PIN_NO(71) | 2) +#define MT8365_PIN_71_CMDAT3__FUNC_DBG_MON_B7 (MTK_PIN_NO(71) | 7) + +#define MT8365_PIN_72_CMDAT4__FUNC_GPIO72 (MTK_PIN_NO(72) | 0) +#define MT8365_PIN_72_CMDAT4__FUNC_CMDAT4 (MTK_PIN_NO(72) | 1) +#define MT8365_PIN_72_CMDAT4__FUNC_ANT_SEL5 (MTK_PIN_NO(72) | 2) +#define MT8365_PIN_72_CMDAT4__FUNC_I2S3_BCK (MTK_PIN_NO(72) | 5) +#define MT8365_PIN_72_CMDAT4__FUNC_DBG_MON_B8 (MTK_PIN_NO(72) | 7) + +#define MT8365_PIN_73_CMDAT5__FUNC_GPIO73 (MTK_PIN_NO(73) | 0) +#define MT8365_PIN_73_CMDAT5__FUNC_CMDAT5 (MTK_PIN_NO(73) | 1) +#define MT8365_PIN_73_CMDAT5__FUNC_ANT_SEL6 (MTK_PIN_NO(73) | 2) +#define MT8365_PIN_73_CMDAT5__FUNC_I2S3_LRCK (MTK_PIN_NO(73) | 5) +#define MT8365_PIN_73_CMDAT5__FUNC_DBG_MON_B9 (MTK_PIN_NO(73) | 7) + +#define MT8365_PIN_74_CMDAT6__FUNC_GPIO74 (MTK_PIN_NO(74) | 0) +#define MT8365_PIN_74_CMDAT6__FUNC_CMDAT6 (MTK_PIN_NO(74) | 1) +#define MT8365_PIN_74_CMDAT6__FUNC_ANT_SEL7 (MTK_PIN_NO(74) | 2) +#define MT8365_PIN_74_CMDAT6__FUNC_I2S3_MCK (MTK_PIN_NO(74) | 5) +#define MT8365_PIN_74_CMDAT6__FUNC_DBG_MON_B10 (MTK_PIN_NO(74) | 7) + +#define MT8365_PIN_75_CMDAT7__FUNC_GPIO75 (MTK_PIN_NO(75) | 0) +#define MT8365_PIN_75_CMDAT7__FUNC_CMDAT7 (MTK_PIN_NO(75) | 1) +#define MT8365_PIN_75_CMDAT7__FUNC_I2S3_DO (MTK_PIN_NO(75) | 5) +#define MT8365_PIN_75_CMDAT7__FUNC_DBG_MON_B11 (MTK_PIN_NO(75) | 7) + +#define MT8365_PIN_76_CMDAT8__FUNC_GPIO76 (MTK_PIN_NO(76) | 0) +#define MT8365_PIN_76_CMDAT8__FUNC_CMDAT8 (MTK_PIN_NO(76) | 1) +#define MT8365_PIN_76_CMDAT8__FUNC_PCM_CLK (MTK_PIN_NO(76) | 5) +#define MT8365_PIN_76_CMDAT8__FUNC_DBG_MON_A29 (MTK_PIN_NO(76) | 7) + +#define MT8365_PIN_77_CMDAT9__FUNC_GPIO77 (MTK_PIN_NO(77) | 0) +#define MT8365_PIN_77_CMDAT9__FUNC_CMDAT9 (MTK_PIN_NO(77) | 1) +#define MT8365_PIN_77_CMDAT9__FUNC_PCM_SYNC (MTK_PIN_NO(77) | 5) +#define MT8365_PIN_77_CMDAT9__FUNC_DBG_MON_A30 (MTK_PIN_NO(77) | 7) + +#define MT8365_PIN_78_CMHSYNC__FUNC_GPIO78 (MTK_PIN_NO(78) | 0) +#define MT8365_PIN_78_CMHSYNC__FUNC_CMHSYNC (MTK_PIN_NO(78) | 1) +#define MT8365_PIN_78_CMHSYNC__FUNC_PCM_RX (MTK_PIN_NO(78) | 5) +#define MT8365_PIN_78_CMHSYNC__FUNC_DBG_MON_A31 (MTK_PIN_NO(78) | 7) + +#define MT8365_PIN_79_CMVSYNC__FUNC_GPIO79 (MTK_PIN_NO(79) | 0) +#define MT8365_PIN_79_CMVSYNC__FUNC_CMVSYNC (MTK_PIN_NO(79) | 1) +#define MT8365_PIN_79_CMVSYNC__FUNC_PCM_TX (MTK_PIN_NO(79) | 5) +#define MT8365_PIN_79_CMVSYNC__FUNC_DBG_MON_A32 (MTK_PIN_NO(79) | 7) + +#define MT8365_PIN_80_MSDC2_CMD__FUNC_GPIO80 (MTK_PIN_NO(80) | 0) +#define MT8365_PIN_80_MSDC2_CMD__FUNC_MSDC2_CMD (MTK_PIN_NO(80) | 1) +#define MT8365_PIN_80_MSDC2_CMD__FUNC_TDM_TX_LRCK (MTK_PIN_NO(80) | 2) +#define MT8365_PIN_80_MSDC2_CMD__FUNC_UTXD1 (MTK_PIN_NO(80) | 3) +#define MT8365_PIN_80_MSDC2_CMD__FUNC_DPI_D19 (MTK_PIN_NO(80) | 4) +#define MT8365_PIN_80_MSDC2_CMD__FUNC_UDI_TMS_XI (MTK_PIN_NO(80) | 5) +#define MT8365_PIN_80_MSDC2_CMD__FUNC_ADSP_JTAG_TMS (MTK_PIN_NO(80) | 6) + +#define MT8365_PIN_81_MSDC2_CLK__FUNC_GPIO81 (MTK_PIN_NO(81) | 0) +#define MT8365_PIN_81_MSDC2_CLK__FUNC_MSDC2_CLK (MTK_PIN_NO(81) | 1) +#define MT8365_PIN_81_MSDC2_CLK__FUNC_TDM_TX_BCK (MTK_PIN_NO(81) | 2) +#define MT8365_PIN_81_MSDC2_CLK__FUNC_URXD1 (MTK_PIN_NO(81) | 3) +#define MT8365_PIN_81_MSDC2_CLK__FUNC_DPI_D20 (MTK_PIN_NO(81) | 4) +#define MT8365_PIN_81_MSDC2_CLK__FUNC_UDI_TCK_XI (MTK_PIN_NO(81) | 5) +#define MT8365_PIN_81_MSDC2_CLK__FUNC_ADSP_JTAG_TCK (MTK_PIN_NO(81) | 6) + +#define MT8365_PIN_82_MSDC2_DAT0__FUNC_GPIO82 (MTK_PIN_NO(82) | 0) +#define MT8365_PIN_82_MSDC2_DAT0__FUNC_MSDC2_DAT0 (MTK_PIN_NO(82) | 1) +#define MT8365_PIN_82_MSDC2_DAT0__FUNC_TDM_TX_DATA0 (MTK_PIN_NO(82) | 2) +#define MT8365_PIN_82_MSDC2_DAT0__FUNC_UTXD2 (MTK_PIN_NO(82) | 3) +#define MT8365_PIN_82_MSDC2_DAT0__FUNC_DPI_D21 (MTK_PIN_NO(82) | 4) +#define MT8365_PIN_82_MSDC2_DAT0__FUNC_UDI_TDI_XI (MTK_PIN_NO(82) | 5) +#define MT8365_PIN_82_MSDC2_DAT0__FUNC_ADSP_JTAG_TDI (MTK_PIN_NO(82) | 6) + +#define MT8365_PIN_83_MSDC2_DAT1__FUNC_GPIO83 (MTK_PIN_NO(83) | 0) +#define MT8365_PIN_83_MSDC2_DAT1__FUNC_MSDC2_DAT1 (MTK_PIN_NO(83) | 1) +#define MT8365_PIN_83_MSDC2_DAT1__FUNC_TDM_TX_DATA1 (MTK_PIN_NO(83) | 2) +#define MT8365_PIN_83_MSDC2_DAT1__FUNC_URXD2 (MTK_PIN_NO(83) | 3) +#define MT8365_PIN_83_MSDC2_DAT1__FUNC_DPI_D22 (MTK_PIN_NO(83) | 4) +#define MT8365_PIN_83_MSDC2_DAT1__FUNC_UDI_TDO (MTK_PIN_NO(83) | 5) +#define MT8365_PIN_83_MSDC2_DAT1__FUNC_ADSP_JTAG_TDO (MTK_PIN_NO(83) | 6) + +#define MT8365_PIN_84_MSDC2_DAT2__FUNC_GPIO84 (MTK_PIN_NO(84) | 0) +#define MT8365_PIN_84_MSDC2_DAT2__FUNC_MSDC2_DAT2 (MTK_PIN_NO(84) | 1) +#define MT8365_PIN_84_MSDC2_DAT2__FUNC_TDM_TX_DATA2 (MTK_PIN_NO(84) | 2) +#define MT8365_PIN_84_MSDC2_DAT2__FUNC_PWM_A (MTK_PIN_NO(84) | 3) +#define MT8365_PIN_84_MSDC2_DAT2__FUNC_DPI_D23 (MTK_PIN_NO(84) | 4) +#define MT8365_PIN_84_MSDC2_DAT2__FUNC_UDI_NTRST_XI (MTK_PIN_NO(84) | 5) +#define MT8365_PIN_84_MSDC2_DAT2__FUNC_ADSP_JTAG_TRST (MTK_PIN_NO(84) | 6) + +#define MT8365_PIN_85_MSDC2_DAT3__FUNC_GPIO85 (MTK_PIN_NO(85) | 0) +#define MT8365_PIN_85_MSDC2_DAT3__FUNC_MSDC2_DAT3 (MTK_PIN_NO(85) | 1) +#define MT8365_PIN_85_MSDC2_DAT3__FUNC_TDM_TX_DATA3 (MTK_PIN_NO(85) | 2) +#define MT8365_PIN_85_MSDC2_DAT3__FUNC_PWM_B (MTK_PIN_NO(85) | 3) +#define MT8365_PIN_85_MSDC2_DAT3__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(85) | 5) + +#define MT8365_PIN_86_MSDC2_DSL__FUNC_GPIO86 (MTK_PIN_NO(86) | 0) +#define MT8365_PIN_86_MSDC2_DSL__FUNC_MSDC2_DSL (MTK_PIN_NO(86) | 1) +#define MT8365_PIN_86_MSDC2_DSL__FUNC_TDM_TX_MCK (MTK_PIN_NO(86) | 2) +#define MT8365_PIN_86_MSDC2_DSL__FUNC_PWM_C (MTK_PIN_NO(86) | 3) + +#define MT8365_PIN_87_MSDC1_CMD__FUNC_GPIO87 (MTK_PIN_NO(87) | 0) +#define MT8365_PIN_87_MSDC1_CMD__FUNC_MSDC1_CMD (MTK_PIN_NO(87) | 1) +#define MT8365_PIN_87_MSDC1_CMD__FUNC_CONN_MCU_AICE_TMSC (MTK_PIN_NO(87) | 2) +#define MT8365_PIN_87_MSDC1_CMD__FUNC_DFD_TMS_XI (MTK_PIN_NO(87) | 3) +#define MT8365_PIN_87_MSDC1_CMD__FUNC_APU_JTAG_TMS (MTK_PIN_NO(87) | 4) +#define MT8365_PIN_87_MSDC1_CMD__FUNC_MCU_SPM_TMS (MTK_PIN_NO(87) | 5) +#define MT8365_PIN_87_MSDC1_CMD__FUNC_CONN_DSP_JMS (MTK_PIN_NO(87) | 6) +#define MT8365_PIN_87_MSDC1_CMD__FUNC_ADSP_JTAG_TMS (MTK_PIN_NO(87) | 7) + +#define MT8365_PIN_88_MSDC1_CLK__FUNC_GPIO88 (MTK_PIN_NO(88) | 0) +#define MT8365_PIN_88_MSDC1_CLK__FUNC_MSDC1_CLK (MTK_PIN_NO(88) | 1) +#define MT8365_PIN_88_MSDC1_CLK__FUNC_CONN_MCU_AICE_TCKC (MTK_PIN_NO(88) | 2) +#define MT8365_PIN_88_MSDC1_CLK__FUNC_DFD_TCK_XI (MTK_PIN_NO(88) | 3) +#define MT8365_PIN_88_MSDC1_CLK__FUNC_APU_JTAG_TCK (MTK_PIN_NO(88) | 4) +#define MT8365_PIN_88_MSDC1_CLK__FUNC_MCU_SPM_TCK (MTK_PIN_NO(88) | 5) +#define MT8365_PIN_88_MSDC1_CLK__FUNC_CONN_DSP_JCK (MTK_PIN_NO(88) | 6) +#define MT8365_PIN_88_MSDC1_CLK__FUNC_ADSP_JTAG_TCK (MTK_PIN_NO(88) | 7) + +#define MT8365_PIN_89_MSDC1_DAT0__FUNC_GPIO89 (MTK_PIN_NO(89) | 0) +#define MT8365_PIN_89_MSDC1_DAT0__FUNC_MSDC1_DAT0 (MTK_PIN_NO(89) | 1) +#define MT8365_PIN_89_MSDC1_DAT0__FUNC_PWM_C (MTK_PIN_NO(89) | 2) +#define MT8365_PIN_89_MSDC1_DAT0__FUNC_DFD_TDI_XI (MTK_PIN_NO(89) | 3) +#define MT8365_PIN_89_MSDC1_DAT0__FUNC_APU_JTAG_TDI (MTK_PIN_NO(89) | 4) +#define MT8365_PIN_89_MSDC1_DAT0__FUNC_MCU_SPM_TDI (MTK_PIN_NO(89) | 5) +#define MT8365_PIN_89_MSDC1_DAT0__FUNC_CONN_DSP_JDI (MTK_PIN_NO(89) | 6) +#define MT8365_PIN_89_MSDC1_DAT0__FUNC_ADSP_JTAG_TDI (MTK_PIN_NO(89) | 7) + +#define MT8365_PIN_90_MSDC1_DAT1__FUNC_GPIO90 (MTK_PIN_NO(90) | 0) +#define MT8365_PIN_90_MSDC1_DAT1__FUNC_MSDC1_DAT1 (MTK_PIN_NO(90) | 1) +#define MT8365_PIN_90_MSDC1_DAT1__FUNC_SPDIF_IN (MTK_PIN_NO(90) | 2) +#define MT8365_PIN_90_MSDC1_DAT1__FUNC_DFD_TDO (MTK_PIN_NO(90) | 3) +#define MT8365_PIN_90_MSDC1_DAT1__FUNC_APU_JTAG_TDO (MTK_PIN_NO(90) | 4) +#define MT8365_PIN_90_MSDC1_DAT1__FUNC_MCU_SPM_TDO (MTK_PIN_NO(90) | 5) +#define MT8365_PIN_90_MSDC1_DAT1__FUNC_CONN_DSP_JDO (MTK_PIN_NO(90) | 6) +#define MT8365_PIN_90_MSDC1_DAT1__FUNC_ADSP_JTAG_TDO (MTK_PIN_NO(90) | 7) + +#define MT8365_PIN_91_MSDC1_DAT2__FUNC_GPIO91 (MTK_PIN_NO(91) | 0) +#define MT8365_PIN_91_MSDC1_DAT2__FUNC_MSDC1_DAT2 (MTK_PIN_NO(91) | 1) +#define MT8365_PIN_91_MSDC1_DAT2__FUNC_SPDIF_OUT (MTK_PIN_NO(91) | 2) +#define MT8365_PIN_91_MSDC1_DAT2__FUNC_DFD_NTRST_XI (MTK_PIN_NO(91) | 3) +#define MT8365_PIN_91_MSDC1_DAT2__FUNC_APU_JTAG_TRST (MTK_PIN_NO(91) | 4) +#define MT8365_PIN_91_MSDC1_DAT2__FUNC_MCU_SPM_NTRST (MTK_PIN_NO(91) | 5) +#define MT8365_PIN_91_MSDC1_DAT2__FUNC_CONN_DSP_JINTP (MTK_PIN_NO(91) | 6) +#define MT8365_PIN_91_MSDC1_DAT2__FUNC_ADSP_JTAG_TRST (MTK_PIN_NO(91) | 7) + +#define MT8365_PIN_92_MSDC1_DAT3__FUNC_GPIO92 (MTK_PIN_NO(92) | 0) +#define MT8365_PIN_92_MSDC1_DAT3__FUNC_MSDC1_DAT3 (MTK_PIN_NO(92) | 1) +#define MT8365_PIN_92_MSDC1_DAT3__FUNC_IRRX (MTK_PIN_NO(92) | 2) +#define MT8365_PIN_92_MSDC1_DAT3__FUNC_PWM_A (MTK_PIN_NO(92) | 3) + +#define MT8365_PIN_93_MSDC0_DAT7__FUNC_GPIO93 (MTK_PIN_NO(93) | 0) +#define MT8365_PIN_93_MSDC0_DAT7__FUNC_MSDC0_DAT7 (MTK_PIN_NO(93) | 1) +#define MT8365_PIN_93_MSDC0_DAT7__FUNC_NLD7 (MTK_PIN_NO(93) | 2) + +#define MT8365_PIN_94_MSDC0_DAT6__FUNC_GPIO94 (MTK_PIN_NO(94) | 0) +#define MT8365_PIN_94_MSDC0_DAT6__FUNC_MSDC0_DAT6 (MTK_PIN_NO(94) | 1) +#define MT8365_PIN_94_MSDC0_DAT6__FUNC_NLD6 (MTK_PIN_NO(94) | 2) + +#define MT8365_PIN_95_MSDC0_DAT5__FUNC_GPIO95 (MTK_PIN_NO(95) | 0) +#define MT8365_PIN_95_MSDC0_DAT5__FUNC_MSDC0_DAT5 (MTK_PIN_NO(95) | 1) +#define MT8365_PIN_95_MSDC0_DAT5__FUNC_NLD4 (MTK_PIN_NO(95) | 2) + +#define MT8365_PIN_96_MSDC0_DAT4__FUNC_GPIO96 (MTK_PIN_NO(96) | 0) +#define MT8365_PIN_96_MSDC0_DAT4__FUNC_MSDC0_DAT4 (MTK_PIN_NO(96) | 1) +#define MT8365_PIN_96_MSDC0_DAT4__FUNC_NLD3 (MTK_PIN_NO(96) | 2) + +#define MT8365_PIN_97_MSDC0_RSTB__FUNC_GPIO97 (MTK_PIN_NO(97) | 0) +#define MT8365_PIN_97_MSDC0_RSTB__FUNC_MSDC0_RSTB (MTK_PIN_NO(97) | 1) +#define MT8365_PIN_97_MSDC0_RSTB__FUNC_NLD0 (MTK_PIN_NO(97) | 2) + +#define MT8365_PIN_98_MSDC0_CMD__FUNC_GPIO98 (MTK_PIN_NO(98) | 0) +#define MT8365_PIN_98_MSDC0_CMD__FUNC_MSDC0_CMD (MTK_PIN_NO(98) | 1) +#define MT8365_PIN_98_MSDC0_CMD__FUNC_NALE (MTK_PIN_NO(98) | 2) + +#define MT8365_PIN_99_MSDC0_CLK__FUNC_GPIO99 (MTK_PIN_NO(99) | 0) +#define MT8365_PIN_99_MSDC0_CLK__FUNC_MSDC0_CLK (MTK_PIN_NO(99) | 1) +#define MT8365_PIN_99_MSDC0_CLK__FUNC_NWEB (MTK_PIN_NO(99) | 2) + +#define MT8365_PIN_100_MSDC0_DAT3__FUNC_GPIO100 (MTK_PIN_NO(100) | 0) +#define MT8365_PIN_100_MSDC0_DAT3__FUNC_MSDC0_DAT3 (MTK_PIN_NO(100) | 1) +#define MT8365_PIN_100_MSDC0_DAT3__FUNC_NLD1 (MTK_PIN_NO(100) | 2) + +#define MT8365_PIN_101_MSDC0_DAT2__FUNC_GPIO101 (MTK_PIN_NO(101) | 0) +#define MT8365_PIN_101_MSDC0_DAT2__FUNC_MSDC0_DAT2 (MTK_PIN_NO(101) | 1) +#define MT8365_PIN_101_MSDC0_DAT2__FUNC_NLD5 (MTK_PIN_NO(101) | 2) + +#define MT8365_PIN_102_MSDC0_DAT1__FUNC_GPIO102 (MTK_PIN_NO(102) | 0) +#define MT8365_PIN_102_MSDC0_DAT1__FUNC_MSDC0_DAT1 (MTK_PIN_NO(102) | 1) +#define MT8365_PIN_102_MSDC0_DAT1__FUNC_NDQS (MTK_PIN_NO(102) | 2) + +#define MT8365_PIN_103_MSDC0_DAT0__FUNC_GPIO103 (MTK_PIN_NO(103) | 0) +#define MT8365_PIN_103_MSDC0_DAT0__FUNC_MSDC0_DAT0 (MTK_PIN_NO(103) | 1) +#define MT8365_PIN_103_MSDC0_DAT0__FUNC_NLD2 (MTK_PIN_NO(103) | 2) + +#define MT8365_PIN_104_MSDC0_DSL__FUNC_GPIO104 (MTK_PIN_NO(104) | 0) +#define MT8365_PIN_104_MSDC0_DSL__FUNC_MSDC0_DSL (MTK_PIN_NO(104) | 1) + +#define MT8365_PIN_105_NCLE__FUNC_GPIO105 (MTK_PIN_NO(105) | 0) +#define MT8365_PIN_105_NCLE__FUNC_NCLE (MTK_PIN_NO(105) | 1) +#define MT8365_PIN_105_NCLE__FUNC_TDM_RX_MCK (MTK_PIN_NO(105) | 2) +#define MT8365_PIN_105_NCLE__FUNC_DBG_MON_B12 (MTK_PIN_NO(105) | 7) + +#define MT8365_PIN_106_NCEB1__FUNC_GPIO106 (MTK_PIN_NO(106) | 0) +#define MT8365_PIN_106_NCEB1__FUNC_NCEB1 (MTK_PIN_NO(106) | 1) +#define MT8365_PIN_106_NCEB1__FUNC_TDM_RX_BCK (MTK_PIN_NO(106) | 2) +#define MT8365_PIN_106_NCEB1__FUNC_DBG_MON_B13 (MTK_PIN_NO(106) | 7) + +#define MT8365_PIN_107_NCEB0__FUNC_GPIO107 (MTK_PIN_NO(107) | 0) +#define MT8365_PIN_107_NCEB0__FUNC_NCEB0 (MTK_PIN_NO(107) | 1) +#define MT8365_PIN_107_NCEB0__FUNC_TDM_RX_LRCK (MTK_PIN_NO(107) | 2) +#define MT8365_PIN_107_NCEB0__FUNC_DBG_MON_B14 (MTK_PIN_NO(107) | 7) + +#define MT8365_PIN_108_NREB__FUNC_GPIO108 (MTK_PIN_NO(108) | 0) +#define MT8365_PIN_108_NREB__FUNC_NREB (MTK_PIN_NO(108) | 1) +#define MT8365_PIN_108_NREB__FUNC_TDM_RX_DI (MTK_PIN_NO(108) | 2) +#define MT8365_PIN_108_NREB__FUNC_DBG_MON_B15 (MTK_PIN_NO(108) | 7) + +#define MT8365_PIN_109_NRNB__FUNC_GPIO109 (MTK_PIN_NO(109) | 0) +#define MT8365_PIN_109_NRNB__FUNC_NRNB (MTK_PIN_NO(109) | 1) +#define MT8365_PIN_109_NRNB__FUNC_TSF_IN (MTK_PIN_NO(109) | 2) +#define MT8365_PIN_109_NRNB__FUNC_DBG_MON_B16 (MTK_PIN_NO(109) | 7) + +#define MT8365_PIN_110_PCM_CLK__FUNC_GPIO110 (MTK_PIN_NO(110) | 0) +#define MT8365_PIN_110_PCM_CLK__FUNC_PCM_CLK (MTK_PIN_NO(110) | 1) +#define MT8365_PIN_110_PCM_CLK__FUNC_I2S0_BCK (MTK_PIN_NO(110) | 2) +#define MT8365_PIN_110_PCM_CLK__FUNC_I2S3_BCK (MTK_PIN_NO(110) | 3) +#define MT8365_PIN_110_PCM_CLK__FUNC_SPDIF_IN (MTK_PIN_NO(110) | 4) +#define MT8365_PIN_110_PCM_CLK__FUNC_DPI_D15 (MTK_PIN_NO(110) | 5) + +#define MT8365_PIN_111_PCM_SYNC__FUNC_GPIO111 (MTK_PIN_NO(111) | 0) +#define MT8365_PIN_111_PCM_SYNC__FUNC_PCM_SYNC (MTK_PIN_NO(111) | 1) +#define MT8365_PIN_111_PCM_SYNC__FUNC_I2S0_LRCK (MTK_PIN_NO(111) | 2) +#define MT8365_PIN_111_PCM_SYNC__FUNC_I2S3_LRCK (MTK_PIN_NO(111) | 3) +#define MT8365_PIN_111_PCM_SYNC__FUNC_SPDIF_OUT (MTK_PIN_NO(111) | 4) +#define MT8365_PIN_111_PCM_SYNC__FUNC_DPI_D16 (MTK_PIN_NO(111) | 5) + +#define MT8365_PIN_112_PCM_RX__FUNC_GPIO112 (MTK_PIN_NO(112) | 0) +#define MT8365_PIN_112_PCM_RX__FUNC_PCM_RX (MTK_PIN_NO(112) | 1) +#define MT8365_PIN_112_PCM_RX__FUNC_I2S0_DI (MTK_PIN_NO(112) | 2) +#define MT8365_PIN_112_PCM_RX__FUNC_I2S3_MCK (MTK_PIN_NO(112) | 3) +#define MT8365_PIN_112_PCM_RX__FUNC_IRRX (MTK_PIN_NO(112) | 4) +#define MT8365_PIN_112_PCM_RX__FUNC_DPI_D17 (MTK_PIN_NO(112) | 5) + +#define MT8365_PIN_113_PCM_TX__FUNC_GPIO113 (MTK_PIN_NO(113) | 0) +#define MT8365_PIN_113_PCM_TX__FUNC_PCM_TX (MTK_PIN_NO(113) | 1) +#define MT8365_PIN_113_PCM_TX__FUNC_I2S0_MCK (MTK_PIN_NO(113) | 2) +#define MT8365_PIN_113_PCM_TX__FUNC_I2S3_DO (MTK_PIN_NO(113) | 3) +#define MT8365_PIN_113_PCM_TX__FUNC_PWM_B (MTK_PIN_NO(113) | 4) +#define MT8365_PIN_113_PCM_TX__FUNC_DPI_D18 (MTK_PIN_NO(113) | 5) + +#define MT8365_PIN_114_I2S_DATA_IN__FUNC_GPIO114 (MTK_PIN_NO(114) | 0) +#define MT8365_PIN_114_I2S_DATA_IN__FUNC_I2S0_DI (MTK_PIN_NO(114) | 1) +#define MT8365_PIN_114_I2S_DATA_IN__FUNC_I2S1_DO (MTK_PIN_NO(114) | 2) +#define MT8365_PIN_114_I2S_DATA_IN__FUNC_I2S2_DI (MTK_PIN_NO(114) | 3) +#define MT8365_PIN_114_I2S_DATA_IN__FUNC_I2S3_DO (MTK_PIN_NO(114) | 4) +#define MT8365_PIN_114_I2S_DATA_IN__FUNC_PWM_A (MTK_PIN_NO(114) | 5) +#define MT8365_PIN_114_I2S_DATA_IN__FUNC_SPDIF_IN (MTK_PIN_NO(114) | 6) +#define MT8365_PIN_114_I2S_DATA_IN__FUNC_DBG_MON_B17 (MTK_PIN_NO(114) | 7) + +#define MT8365_PIN_115_I2S_LRCK__FUNC_GPIO115 (MTK_PIN_NO(115) | 0) +#define MT8365_PIN_115_I2S_LRCK__FUNC_I2S0_LRCK (MTK_PIN_NO(115) | 1) +#define MT8365_PIN_115_I2S_LRCK__FUNC_I2S1_LRCK (MTK_PIN_NO(115) | 2) +#define MT8365_PIN_115_I2S_LRCK__FUNC_I2S2_LRCK (MTK_PIN_NO(115) | 3) +#define MT8365_PIN_115_I2S_LRCK__FUNC_I2S3_LRCK (MTK_PIN_NO(115) | 4) +#define MT8365_PIN_115_I2S_LRCK__FUNC_PWM_B (MTK_PIN_NO(115) | 5) +#define MT8365_PIN_115_I2S_LRCK__FUNC_SPDIF_OUT (MTK_PIN_NO(115) | 6) +#define MT8365_PIN_115_I2S_LRCK__FUNC_DBG_MON_B18 (MTK_PIN_NO(115) | 7) + +#define MT8365_PIN_116_I2S_BCK__FUNC_GPIO116 (MTK_PIN_NO(116) | 0) +#define MT8365_PIN_116_I2S_BCK__FUNC_I2S0_BCK (MTK_PIN_NO(116) | 1) +#define MT8365_PIN_116_I2S_BCK__FUNC_I2S1_BCK (MTK_PIN_NO(116) | 2) +#define MT8365_PIN_116_I2S_BCK__FUNC_I2S2_BCK (MTK_PIN_NO(116) | 3) +#define MT8365_PIN_116_I2S_BCK__FUNC_I2S3_BCK (MTK_PIN_NO(116) | 4) +#define MT8365_PIN_116_I2S_BCK__FUNC_PWM_C (MTK_PIN_NO(116) | 5) +#define MT8365_PIN_116_I2S_BCK__FUNC_IRRX (MTK_PIN_NO(116) | 6) +#define MT8365_PIN_116_I2S_BCK__FUNC_DBG_MON_B19 (MTK_PIN_NO(116) | 7) + +#define MT8365_PIN_117_DMIC0_CLK__FUNC_GPIO117 (MTK_PIN_NO(117) | 0) +#define MT8365_PIN_117_DMIC0_CLK__FUNC_DMIC0_CLK (MTK_PIN_NO(117) | 1) +#define MT8365_PIN_117_DMIC0_CLK__FUNC_I2S2_BCK (MTK_PIN_NO(117) | 2) +#define MT8365_PIN_117_DMIC0_CLK__FUNC_DBG_MON_B20 (MTK_PIN_NO(117) | 7) + +#define MT8365_PIN_118_DMIC0_DAT0__FUNC_GPIO118 (MTK_PIN_NO(118) | 0) +#define MT8365_PIN_118_DMIC0_DAT0__FUNC_DMIC0_DAT0 (MTK_PIN_NO(118) | 1) +#define MT8365_PIN_118_DMIC0_DAT0__FUNC_I2S2_DI (MTK_PIN_NO(118) | 2) +#define MT8365_PIN_118_DMIC0_DAT0__FUNC_DBG_MON_B21 (MTK_PIN_NO(118) | 7) + +#define MT8365_PIN_119_DMIC0_DAT1__FUNC_GPIO119 (MTK_PIN_NO(119) | 0) +#define MT8365_PIN_119_DMIC0_DAT1__FUNC_DMIC0_DAT1 (MTK_PIN_NO(119) | 1) +#define MT8365_PIN_119_DMIC0_DAT1__FUNC_I2S2_LRCK (MTK_PIN_NO(119) | 2) +#define MT8365_PIN_119_DMIC0_DAT1__FUNC_DBG_MON_B22 (MTK_PIN_NO(119) | 7) + +#define MT8365_PIN_120_DMIC1_CLK__FUNC_GPIO120 (MTK_PIN_NO(120) | 0) +#define MT8365_PIN_120_DMIC1_CLK__FUNC_DMIC1_CLK (MTK_PIN_NO(120) | 1) +#define MT8365_PIN_120_DMIC1_CLK__FUNC_I2S2_MCK (MTK_PIN_NO(120) | 2) +#define MT8365_PIN_120_DMIC1_CLK__FUNC_DBG_MON_B23 (MTK_PIN_NO(120) | 7) + +#define MT8365_PIN_121_DMIC1_DAT0__FUNC_GPIO121 (MTK_PIN_NO(121) | 0) +#define MT8365_PIN_121_DMIC1_DAT0__FUNC_DMIC1_DAT0 (MTK_PIN_NO(121) | 1) +#define MT8365_PIN_121_DMIC1_DAT0__FUNC_I2S1_BCK (MTK_PIN_NO(121) | 2) +#define MT8365_PIN_121_DMIC1_DAT0__FUNC_DBG_MON_B24 (MTK_PIN_NO(121) | 7) + +#define MT8365_PIN_122_DMIC1_DAT1__FUNC_GPIO122 (MTK_PIN_NO(122) | 0) +#define MT8365_PIN_122_DMIC1_DAT1__FUNC_DMIC1_DAT1 (MTK_PIN_NO(122) | 1) +#define MT8365_PIN_122_DMIC1_DAT1__FUNC_I2S1_LRCK (MTK_PIN_NO(122) | 2) +#define MT8365_PIN_122_DMIC1_DAT1__FUNC_DBG_MON_B25 (MTK_PIN_NO(122) | 7) + +#define MT8365_PIN_123_DMIC2_CLK__FUNC_GPIO123 (MTK_PIN_NO(123) | 0) +#define MT8365_PIN_123_DMIC2_CLK__FUNC_DMIC2_CLK (MTK_PIN_NO(123) | 1) +#define MT8365_PIN_123_DMIC2_CLK__FUNC_I2S1_MCK (MTK_PIN_NO(123) | 2) +#define MT8365_PIN_123_DMIC2_CLK__FUNC_DBG_MON_B26 (MTK_PIN_NO(123) | 7) + +#define MT8365_PIN_124_DMIC2_DAT0__FUNC_GPIO124 (MTK_PIN_NO(124) | 0) +#define MT8365_PIN_124_DMIC2_DAT0__FUNC_DMIC2_DAT0 (MTK_PIN_NO(124) | 1) +#define MT8365_PIN_124_DMIC2_DAT0__FUNC_I2S1_DO (MTK_PIN_NO(124) | 2) +#define MT8365_PIN_124_DMIC2_DAT0__FUNC_DBG_MON_B27 (MTK_PIN_NO(124) | 7) + +#define MT8365_PIN_125_DMIC2_DAT1__FUNC_GPIO125 (MTK_PIN_NO(125) | 0) +#define MT8365_PIN_125_DMIC2_DAT1__FUNC_DMIC2_DAT1 (MTK_PIN_NO(125) | 1) +#define MT8365_PIN_125_DMIC2_DAT1__FUNC_TDM_RX_BCK (MTK_PIN_NO(125) | 2) +#define MT8365_PIN_125_DMIC2_DAT1__FUNC_DBG_MON_B28 (MTK_PIN_NO(125) | 7) + +#define MT8365_PIN_126_DMIC3_CLK__FUNC_GPIO126 (MTK_PIN_NO(126) | 0) +#define MT8365_PIN_126_DMIC3_CLK__FUNC_DMIC3_CLK (MTK_PIN_NO(126) | 1) +#define MT8365_PIN_126_DMIC3_CLK__FUNC_TDM_RX_LRCK (MTK_PIN_NO(126) | 2) + +#define MT8365_PIN_127_DMIC3_DAT0__FUNC_GPIO127 (MTK_PIN_NO(127) | 0) +#define MT8365_PIN_127_DMIC3_DAT0__FUNC_DMIC3_DAT0 (MTK_PIN_NO(127) | 1) +#define MT8365_PIN_127_DMIC3_DAT0__FUNC_TDM_RX_DI (MTK_PIN_NO(127) | 2) + +#define MT8365_PIN_128_DMIC3_DAT1__FUNC_GPIO128 (MTK_PIN_NO(128) | 0) +#define MT8365_PIN_128_DMIC3_DAT1__FUNC_DMIC3_DAT1 (MTK_PIN_NO(128) | 1) +#define MT8365_PIN_128_DMIC3_DAT1__FUNC_TDM_RX_MCK (MTK_PIN_NO(128) | 2) +#define MT8365_PIN_128_DMIC3_DAT1__FUNC_VAD_CLK (MTK_PIN_NO(128) | 3) + +#define MT8365_PIN_129_TDM_TX_BCK__FUNC_GPIO129 (MTK_PIN_NO(129) | 0) +#define MT8365_PIN_129_TDM_TX_BCK__FUNC_TDM_TX_BCK (MTK_PIN_NO(129) | 1) +#define MT8365_PIN_129_TDM_TX_BCK__FUNC_I2S3_BCK (MTK_PIN_NO(129) | 2) +#define MT8365_PIN_129_TDM_TX_BCK__FUNC_ckmon1_ck (MTK_PIN_NO(129) | 3) + +#define MT8365_PIN_130_TDM_TX_LRCK__FUNC_GPIO130 (MTK_PIN_NO(130) | 0) +#define MT8365_PIN_130_TDM_TX_LRCK__FUNC_TDM_TX_LRCK (MTK_PIN_NO(130) | 1) +#define MT8365_PIN_130_TDM_TX_LRCK__FUNC_I2S3_LRCK (MTK_PIN_NO(130) | 2) +#define MT8365_PIN_130_TDM_TX_LRCK__FUNC_ckmon2_ck (MTK_PIN_NO(130) | 3) + +#define MT8365_PIN_131_TDM_TX_MCK__FUNC_GPIO131 (MTK_PIN_NO(131) | 0) +#define MT8365_PIN_131_TDM_TX_MCK__FUNC_TDM_TX_MCK (MTK_PIN_NO(131) | 1) +#define MT8365_PIN_131_TDM_TX_MCK__FUNC_I2S3_MCK (MTK_PIN_NO(131) | 2) +#define MT8365_PIN_131_TDM_TX_MCK__FUNC_ckmon3_ck (MTK_PIN_NO(131) | 3) + +#define MT8365_PIN_132_TDM_TX_DATA0__FUNC_GPIO132 (MTK_PIN_NO(132) | 0) +#define MT8365_PIN_132_TDM_TX_DATA0__FUNC_TDM_TX_DATA0 (MTK_PIN_NO(132) | 1) +#define MT8365_PIN_132_TDM_TX_DATA0__FUNC_I2S3_DO (MTK_PIN_NO(132) | 2) +#define MT8365_PIN_132_TDM_TX_DATA0__FUNC_ckmon4_ck (MTK_PIN_NO(132) | 3) +#define MT8365_PIN_132_TDM_TX_DATA0__FUNC_DBG_MON_B29 (MTK_PIN_NO(132) | 7) + +#define MT8365_PIN_133_TDM_TX_DATA1__FUNC_GPIO133 (MTK_PIN_NO(133) | 0) +#define MT8365_PIN_133_TDM_TX_DATA1__FUNC_TDM_TX_DATA1 (MTK_PIN_NO(133) | 1) +#define MT8365_PIN_133_TDM_TX_DATA1__FUNC_DBG_MON_B30 (MTK_PIN_NO(133) | 7) + +#define MT8365_PIN_134_TDM_TX_DATA2__FUNC_GPIO134 (MTK_PIN_NO(134) | 0) +#define MT8365_PIN_134_TDM_TX_DATA2__FUNC_TDM_TX_DATA2 (MTK_PIN_NO(134) | 1) +#define MT8365_PIN_134_TDM_TX_DATA2__FUNC_DBG_MON_B31 (MTK_PIN_NO(134) | 7) + +#define MT8365_PIN_135_TDM_TX_DATA3__FUNC_GPIO135 (MTK_PIN_NO(135) | 0) +#define MT8365_PIN_135_TDM_TX_DATA3__FUNC_TDM_TX_DATA3 (MTK_PIN_NO(135) | 1) +#define MT8365_PIN_135_TDM_TX_DATA3__FUNC_DBG_MON_B32 (MTK_PIN_NO(135) | 7) + +#define MT8365_PIN_136_CONN_TOP_CLK__FUNC_GPIO136 (MTK_PIN_NO(136) | 0) +#define MT8365_PIN_136_CONN_TOP_CLK__FUNC_CONN_TOP_CLK (MTK_PIN_NO(136) | 1) + +#define MT8365_PIN_137_CONN_TOP_DATA__FUNC_GPIO137 (MTK_PIN_NO(137) | 0) +#define MT8365_PIN_137_CONN_TOP_DATA__FUNC_CONN_TOP_DATA (MTK_PIN_NO(137) | 1) + +#define MT8365_PIN_138_CONN_HRST_B__FUNC_GPIO138 (MTK_PIN_NO(138) | 0) +#define MT8365_PIN_138_CONN_HRST_B__FUNC_CONN_HRST_B (MTK_PIN_NO(138) | 1) + +#define MT8365_PIN_139_CONN_WB_PTA__FUNC_GPIO139 (MTK_PIN_NO(139) | 0) +#define MT8365_PIN_139_CONN_WB_PTA__FUNC_CONN_WB_PTA (MTK_PIN_NO(139) | 1) + +#define MT8365_PIN_140_CONN_BT_CLK__FUNC_GPIO140 (MTK_PIN_NO(140) | 0) +#define MT8365_PIN_140_CONN_BT_CLK__FUNC_CONN_BT_CLK (MTK_PIN_NO(140) | 1) + +#define MT8365_PIN_141_CONN_BT_DATA__FUNC_GPIO141 (MTK_PIN_NO(141) | 0) +#define MT8365_PIN_141_CONN_BT_DATA__FUNC_CONN_BT_DATA (MTK_PIN_NO(141) | 1) + +#define MT8365_PIN_142_CONN_WF_CTRL0__FUNC_GPIO142 (MTK_PIN_NO(142) | 0) +#define MT8365_PIN_142_CONN_WF_CTRL0__FUNC_CONN_WF_CTRL0 (MTK_PIN_NO(142) | 1) + +#define MT8365_PIN_143_CONN_WF_CTRL1__FUNC_GPIO143 (MTK_PIN_NO(143) | 0) +#define MT8365_PIN_143_CONN_WF_CTRL1__FUNC_CONN_WF_CTRL1 (MTK_PIN_NO(143) | 1) + +#define MT8365_PIN_144_CONN_WF_CTRL2__FUNC_GPIO144 (MTK_PIN_NO(144) | 0) +#define MT8365_PIN_144_CONN_WF_CTRL2__FUNC_CONN_WF_CTRL2 (MTK_PIN_NO(144) | 1) + +#endif /* __MT8365_PINFUNC_H */ diff --git a/include/dt-bindings/power/imx8mm-power.h b/include/dt-bindings/power/imx8mm-power.h new file mode 100644 index 0000000000..fc9c2e16aa --- /dev/null +++ b/include/dt-bindings/power/imx8mm-power.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ +/* + * Copyright (C) 2020 Pengutronix, Lucas Stach + */ + +#ifndef __DT_BINDINGS_IMX8MM_POWER_H__ +#define __DT_BINDINGS_IMX8MM_POWER_H__ + +#define IMX8MM_POWER_DOMAIN_HSIOMIX 0 +#define IMX8MM_POWER_DOMAIN_PCIE 1 +#define IMX8MM_POWER_DOMAIN_OTG1 2 +#define IMX8MM_POWER_DOMAIN_OTG2 3 +#define IMX8MM_POWER_DOMAIN_GPUMIX 4 +#define IMX8MM_POWER_DOMAIN_GPU 5 +#define IMX8MM_POWER_DOMAIN_VPUMIX 6 +#define IMX8MM_POWER_DOMAIN_VPUG1 7 +#define IMX8MM_POWER_DOMAIN_VPUG2 8 +#define IMX8MM_POWER_DOMAIN_VPUH1 9 +#define IMX8MM_POWER_DOMAIN_DISPMIX 10 +#define IMX8MM_POWER_DOMAIN_MIPI 11 + +#endif diff --git a/include/dt-bindings/power/imx8mn-power.h b/include/dt-bindings/power/imx8mn-power.h new file mode 100644 index 0000000000..102ee85a9b --- /dev/null +++ b/include/dt-bindings/power/imx8mn-power.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ +/* + * Copyright (C) 2020 Compass Electronics Group, LLC + */ + +#ifndef __DT_BINDINGS_IMX8MN_POWER_H__ +#define __DT_BINDINGS_IMX8MN_POWER_H__ + +#define IMX8MN_POWER_DOMAIN_HSIOMIX 0 +#define IMX8MN_POWER_DOMAIN_OTG1 1 +#define IMX8MN_POWER_DOMAIN_GPUMIX 2 +#define IMX8MN_POWER_DOMAIN_DISPMIX 3 +#define IMX8MN_POWER_DOMAIN_MIPI 4 + +#endif diff --git a/include/dt-bindings/power/qcom-rpmpd.h b/include/dt-bindings/power/qcom-rpmpd.h index eedb5d94c0..8b5708bb96 100644 --- a/include/dt-bindings/power/qcom-rpmpd.h +++ b/include/dt-bindings/power/qcom-rpmpd.h @@ -81,6 +81,19 @@ #define SC7280_LCX 7 #define SC7280_MSS 8 +/* SC8180X Power Domain Indexes */ +#define SC8180X_CX 0 +#define SC8180X_CX_AO 1 +#define SC8180X_EBI 2 +#define SC8180X_GFX 3 +#define SC8180X_LCX 4 +#define SC8180X_LMX 5 +#define SC8180X_MMCX 6 +#define SC8180X_MMCX_AO 7 +#define SC8180X_MSS 8 +#define SC8180X_MX 9 +#define SC8180X_MX_AO 10 + /* SDM845 Power Domain performance levels */ #define RPMH_REGULATOR_LEVEL_RETENTION 16 #define RPMH_REGULATOR_LEVEL_MIN_SVS 48 @@ -95,6 +108,14 @@ #define RPMH_REGULATOR_LEVEL_TURBO 384 #define RPMH_REGULATOR_LEVEL_TURBO_L1 416 +/* MDM9607 Power Domains */ +#define MDM9607_VDDCX 0 +#define MDM9607_VDDCX_AO 1 +#define MDM9607_VDDCX_VFL 2 +#define MDM9607_VDDMX 3 +#define MDM9607_VDDMX_AO 4 +#define MDM9607_VDDMX_VFL 5 + /* MSM8939 Power Domains */ #define MSM8939_VDDMDCX 0 #define MSM8939_VDDMDCX_AO 1 diff --git a/include/dt-bindings/power/rk3568-power.h b/include/dt-bindings/power/rk3568-power.h new file mode 100644 index 0000000000..6cc1af1a9d --- /dev/null +++ b/include/dt-bindings/power/rk3568-power.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DT_BINDINGS_POWER_RK3568_POWER_H__ +#define __DT_BINDINGS_POWER_RK3568_POWER_H__ + +/* VD_CORE */ +#define RK3568_PD_CPU_0 0 +#define RK3568_PD_CPU_1 1 +#define RK3568_PD_CPU_2 2 +#define RK3568_PD_CPU_3 3 +#define RK3568_PD_CORE_ALIVE 4 + +/* VD_PMU */ +#define RK3568_PD_PMU 5 + +/* VD_NPU */ +#define RK3568_PD_NPU 6 + +/* VD_GPU */ +#define RK3568_PD_GPU 7 + +/* VD_LOGIC */ +#define RK3568_PD_VI 8 +#define RK3568_PD_VO 9 +#define RK3568_PD_RGA 10 +#define RK3568_PD_VPU 11 +#define RK3568_PD_CENTER 12 +#define RK3568_PD_RKVDEC 13 +#define RK3568_PD_RKVENC 14 +#define RK3568_PD_PIPE 15 +#define RK3568_PD_LOGIC_ALIVE 16 + +#endif diff --git a/include/dt-bindings/reset/stm32mp1-resets.h b/include/dt-bindings/reset/stm32mp1-resets.h index f0c3aaef67..f3a0ed3178 100644 --- a/include/dt-bindings/reset/stm32mp1-resets.h +++ b/include/dt-bindings/reset/stm32mp1-resets.h @@ -7,6 +7,7 @@ #ifndef _DT_BINDINGS_STM32MP1_RESET_H_ #define _DT_BINDINGS_STM32MP1_RESET_H_ +#define MCU_HOLD_BOOT_R 2144 #define LTDC_R 3072 #define DSI_R 3076 #define DDRPERFM_R 3080 @@ -105,4 +106,18 @@ #define GPIOJ_R 19785 #define GPIOK_R 19786 +/* SCMI reset domain identifiers */ +#define RST_SCMI0_SPI6 0 +#define RST_SCMI0_I2C4 1 +#define RST_SCMI0_I2C6 2 +#define RST_SCMI0_USART1 3 +#define RST_SCMI0_STGEN 4 +#define RST_SCMI0_GPIOZ 5 +#define RST_SCMI0_CRYP1 6 +#define RST_SCMI0_HASH1 7 +#define RST_SCMI0_RNG1 8 +#define RST_SCMI0_MDMA 9 +#define RST_SCMI0_MCU 10 +#define RST_SCMI0_MCU_HOLD_BOOT 11 + #endif /* _DT_BINDINGS_STM32MP1_RESET_H_ */ diff --git a/include/dt-bindings/sound/qcom,q6afe.h b/include/dt-bindings/sound/qcom,q6afe.h index f64b5d2e6e..66c21ab03e 100644 --- a/include/dt-bindings/sound/qcom,q6afe.h +++ b/include/dt-bindings/sound/qcom,q6afe.h @@ -129,6 +129,8 @@ #define TX_CODEC_DMA_TX_5 124 #define RX_CODEC_DMA_RX_6 125 #define RX_CODEC_DMA_RX_7 126 +#define QUINARY_MI2S_RX 127 +#define QUINARY_MI2S_TX 128 #define LPASS_CLK_ID_PRI_MI2S_IBIT 1 #define LPASS_CLK_ID_PRI_MI2S_EBIT 2 diff --git a/include/kunit/test.h b/include/kunit/test.h index 49601c4b98..24b40e5c16 100644 --- a/include/kunit/test.h +++ b/include/kunit/test.h @@ -97,6 +97,9 @@ struct kunit; /* Maximum size of parameter description string. */ #define KUNIT_PARAM_DESC_SIZE 128 +/* Maximum size of a status comment. */ +#define KUNIT_STATUS_COMMENT_SIZE 256 + /* * TAP specifies subtest stream indentation of 4 spaces, 8 spaces for a * sub-subtest. See the "Subtests" section in @@ -105,6 +108,18 @@ struct kunit; #define KUNIT_SUBTEST_INDENT " " #define KUNIT_SUBSUBTEST_INDENT " " +/** + * enum kunit_status - Type of result for a test or test suite + * @KUNIT_SUCCESS: Denotes the test suite has not failed nor been skipped + * @KUNIT_FAILURE: Denotes the test has failed. + * @KUNIT_SKIPPED: Denotes the test has been skipped. + */ +enum kunit_status { + KUNIT_SUCCESS, + KUNIT_FAILURE, + KUNIT_SKIPPED, +}; + /** * struct kunit_case - represents an individual test case. * @@ -148,13 +163,20 @@ struct kunit_case { const void* (*generate_params)(const void *prev, char *desc); /* private: internal use only. */ - bool success; + enum kunit_status status; char *log; }; -static inline char *kunit_status_to_string(bool status) +static inline char *kunit_status_to_ok_not_ok(enum kunit_status status) { - return status ? "ok" : "not ok"; + switch (status) { + case KUNIT_SKIPPED: + case KUNIT_SUCCESS: + return "ok"; + case KUNIT_FAILURE: + return "not ok"; + } + return "invalid"; } /** @@ -212,6 +234,7 @@ struct kunit_suite { struct kunit_case *test_cases; /* private: internal use only */ + char status_comment[KUNIT_STATUS_COMMENT_SIZE]; struct dentry *debugfs; char *log; }; @@ -245,19 +268,21 @@ struct kunit { * be read after the test case finishes once all threads associated * with the test case have terminated. */ - bool success; /* Read only after test_case finishes! */ spinlock_t lock; /* Guards all mutable test state. */ + enum kunit_status status; /* Read only after test_case finishes! */ /* * Because resources is a list that may be updated multiple times (with * new resources) from any thread associated with a test case, we must * protect it with some type of lock. */ struct list_head resources; /* Protected by lock. */ + + char status_comment[KUNIT_STATUS_COMMENT_SIZE]; }; static inline void kunit_set_failure(struct kunit *test) { - WRITE_ONCE(test->success, false); + WRITE_ONCE(test->status, KUNIT_FAILURE); } void kunit_init_test(struct kunit *test, const char *name, char *log); @@ -348,7 +373,7 @@ static inline int kunit_run_all_tests(void) #define kunit_suite_for_each_test_case(suite, test_case) \ for (test_case = suite->test_cases; test_case->run_case; test_case++) -bool kunit_suite_has_succeeded(struct kunit_suite *suite); +enum kunit_status kunit_suite_has_succeeded(struct kunit_suite *suite); /* * Like kunit_alloc_resource() below, but returns the struct kunit_resource @@ -515,8 +540,9 @@ kunit_find_resource(struct kunit *test, void *match_data) { struct kunit_resource *res, *found = NULL; + unsigned long flags; - spin_lock(&test->lock); + spin_lock_irqsave(&test->lock, flags); list_for_each_entry_reverse(res, &test->resources, node) { if (match(test, res, (void *)match_data)) { @@ -526,7 +552,7 @@ kunit_find_resource(struct kunit *test, } } - spin_unlock(&test->lock); + spin_unlock_irqrestore(&test->lock, flags); return found; } @@ -576,17 +602,31 @@ static inline int kunit_destroy_named_resource(struct kunit *test, */ void kunit_remove_resource(struct kunit *test, struct kunit_resource *res); +/** + * kunit_kmalloc_array() - Like kmalloc_array() except the allocation is *test managed*. + * @test: The test context object. + * @n: number of elements. + * @size: The size in bytes of the desired memory. + * @gfp: flags passed to underlying kmalloc(). + * + * Just like `kmalloc_array(...)`, except the allocation is managed by the test case + * and is automatically cleaned up after the test case concludes. See &struct + * kunit_resource for more information. + */ +void *kunit_kmalloc_array(struct kunit *test, size_t n, size_t size, gfp_t flags); + /** * kunit_kmalloc() - Like kmalloc() except the allocation is *test managed*. * @test: The test context object. * @size: The size in bytes of the desired memory. * @gfp: flags passed to underlying kmalloc(). * - * Just like `kmalloc(...)`, except the allocation is managed by the test case - * and is automatically cleaned up after the test case concludes. See &struct - * kunit_resource for more information. + * See kmalloc() and kunit_kmalloc_array() for more information. */ -void *kunit_kmalloc(struct kunit *test, size_t size, gfp_t gfp); +static inline void *kunit_kmalloc(struct kunit *test, size_t size, gfp_t gfp) +{ + return kunit_kmalloc_array(test, 1, size, gfp); +} /** * kunit_kfree() - Like kfree except for allocations managed by KUnit. @@ -601,16 +641,66 @@ void kunit_kfree(struct kunit *test, const void *ptr); * @size: The size in bytes of the desired memory. * @gfp: flags passed to underlying kmalloc(). * - * See kzalloc() and kunit_kmalloc() for more information. + * See kzalloc() and kunit_kmalloc_array() for more information. */ static inline void *kunit_kzalloc(struct kunit *test, size_t size, gfp_t gfp) { return kunit_kmalloc(test, size, gfp | __GFP_ZERO); } +/** + * kunit_kcalloc() - Just like kunit_kmalloc_array(), but zeroes the allocation. + * @test: The test context object. + * @n: number of elements. + * @size: The size in bytes of the desired memory. + * @gfp: flags passed to underlying kmalloc(). + * + * See kcalloc() and kunit_kmalloc_array() for more information. + */ +static inline void *kunit_kcalloc(struct kunit *test, size_t n, size_t size, gfp_t flags) +{ + return kunit_kmalloc_array(test, n, size, flags | __GFP_ZERO); +} + void kunit_cleanup(struct kunit *test); -void kunit_log_append(char *log, const char *fmt, ...); +void __printf(2, 3) kunit_log_append(char *log, const char *fmt, ...); + +/** + * kunit_mark_skipped() - Marks @test_or_suite as skipped + * + * @test_or_suite: The test context object. + * @fmt: A printk() style format string. + * + * Marks the test as skipped. @fmt is given output as the test status + * comment, typically the reason the test was skipped. + * + * Test execution continues after kunit_mark_skipped() is called. + */ +#define kunit_mark_skipped(test_or_suite, fmt, ...) \ + do { \ + WRITE_ONCE((test_or_suite)->status, KUNIT_SKIPPED); \ + scnprintf((test_or_suite)->status_comment, \ + KUNIT_STATUS_COMMENT_SIZE, \ + fmt, ##__VA_ARGS__); \ + } while (0) + +/** + * kunit_skip() - Marks @test_or_suite as skipped + * + * @test_or_suite: The test context object. + * @fmt: A printk() style format string. + * + * Skips the test. @fmt is given output as the test status + * comment, typically the reason the test was skipped. + * + * Test execution is halted after kunit_skip() is called. + */ +#define kunit_skip(test_or_suite, fmt, ...) \ + do { \ + kunit_mark_skipped((test_or_suite), fmt, ##__VA_ARGS__);\ + kunit_try_catch_throw(&((test_or_suite)->try_catch)); \ + } while (0) /* * printk and log to per-test or per-suite log buffer. Logging only done @@ -775,7 +865,6 @@ void kunit_do_assertion(struct kunit *test, do { \ typeof(left) __left = (left); \ typeof(right) __right = (right); \ - ((void)__typecheck(__left, __right)); \ \ KUNIT_ASSERTION(test, \ __left op __right, \ @@ -1129,8 +1218,8 @@ do { \ fmt, \ ...) \ do { \ - typeof(left) __left = (left); \ - typeof(right) __right = (right); \ + const char *__left = (left); \ + const char *__right = (right); \ \ KUNIT_ASSERTION(test, \ strcmp(__left, __right) op 0, \ diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index ec621180ef..e602d848fc 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h @@ -72,6 +72,9 @@ struct vgic_global { bool has_gicv4; bool has_gicv4_1; + /* Pseudo GICv3 from outer space */ + bool no_hw_deactivation; + /* GIC system register CPU interface */ struct static_key_false gicv3_cpuif; @@ -89,6 +92,26 @@ enum vgic_irq_config { VGIC_CONFIG_LEVEL }; +/* + * Per-irq ops overriding some common behavious. + * + * Always called in non-preemptible section and the functions can use + * kvm_arm_get_running_vcpu() to get the vcpu pointer for private IRQs. + */ +struct irq_ops { + /* Per interrupt flags for special-cased interrupts */ + unsigned long flags; + +#define VGIC_IRQ_SW_RESAMPLE BIT(0) /* Clear the active state for resampling */ + + /* + * Callback function pointer to in-kernel devices that can tell us the + * state of the input level of mapped level-triggered IRQ faster than + * peaking into the physical GIC. + */ + bool (*get_input_level)(int vintid); +}; + struct vgic_irq { raw_spinlock_t irq_lock; /* Protects the content of the struct */ struct list_head lpi_list; /* Used to link all LPIs together */ @@ -126,21 +149,17 @@ struct vgic_irq { u8 group; /* 0 == group 0, 1 == group 1 */ enum vgic_irq_config config; /* Level or edge */ - /* - * Callback function pointer to in-kernel devices that can tell us the - * state of the input level of mapped level-triggered IRQ faster than - * peaking into the physical GIC. - * - * Always called in non-preemptible section and the functions can use - * kvm_arm_get_running_vcpu() to get the vcpu pointer for private - * IRQs. - */ - bool (*get_input_level)(int vintid); + struct irq_ops *ops; void *owner; /* Opaque pointer to reserve an interrupt for in-kernel devices. */ }; +static inline bool vgic_irq_needs_resampling(struct vgic_irq *irq) +{ + return irq->ops && (irq->ops->flags & VGIC_IRQ_SW_RESAMPLE); +} + struct vgic_register_region; struct vgic_its; @@ -352,7 +371,7 @@ void kvm_vgic_init_cpu_hardware(void); int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid, bool level, void *owner); int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq, - u32 vintid, bool (*get_input_level)(int vindid)); + u32 vintid, struct irq_ops *ops); int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid); bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int vintid); diff --git a/include/linux/acpi.h b/include/linux/acpi.h index c60745f657..72e4f7fd26 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -132,6 +132,7 @@ enum acpi_address_range_id { union acpi_subtable_headers { struct acpi_subtable_header common; struct acpi_hmat_structure hmat; + struct acpi_prmt_module_header prmt; }; typedef int (*acpi_tbl_table_handler)(struct acpi_table_header *table); @@ -259,9 +260,12 @@ void acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa); #ifdef CONFIG_ARM64 void acpi_numa_gicc_affinity_init(struct acpi_srat_gicc_affinity *pa); +void acpi_arch_dma_setup(struct device *dev, u64 *dma_addr, u64 *dma_size); #else static inline void acpi_numa_gicc_affinity_init(struct acpi_srat_gicc_affinity *pa) { } +static inline void +acpi_arch_dma_setup(struct device *dev, u64 *dma_addr, u64 *dma_size) { } #endif int acpi_numa_memory_affinity_init (struct acpi_srat_mem_affinity *ma); @@ -551,6 +555,7 @@ acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context); #define OSC_SB_CPC_DIVERSE_HIGH_SUPPORT 0x00001000 #define OSC_SB_GENERIC_INITIATOR_SUPPORT 0x00002000 #define OSC_SB_NATIVE_USB4_SUPPORT 0x00040000 +#define OSC_SB_PRM_SUPPORT 0x00200000 extern bool osc_sb_apei_support_acked; extern bool osc_pc_lpi_support_confirmed; @@ -666,7 +671,6 @@ extern bool acpi_driver_match_device(struct device *dev, const struct device_driver *drv); int acpi_device_uevent_modalias(struct device *, struct kobj_uevent_env *); int acpi_device_modalias(struct device *, char *, int); -void acpi_walk_dep_device_list(acpi_handle handle); struct platform_device *acpi_create_platform_device(struct acpi_device *, struct property_entry *); @@ -710,6 +714,8 @@ static inline u64 acpi_arch_get_root_pointer(void) } #endif +int acpi_get_local_address(acpi_handle handle, u32 *addr); + #else /* !CONFIG_ACPI */ #define acpi_disabled 1 @@ -765,7 +771,7 @@ static inline bool is_acpi_device_node(const struct fwnode_handle *fwnode) return false; } -static inline struct acpi_device *to_acpi_device_node(struct fwnode_handle *fwnode) +static inline struct acpi_device *to_acpi_device_node(const struct fwnode_handle *fwnode) { return NULL; } @@ -775,12 +781,12 @@ static inline bool is_acpi_data_node(const struct fwnode_handle *fwnode) return false; } -static inline struct acpi_data_node *to_acpi_data_node(struct fwnode_handle *fwnode) +static inline struct acpi_data_node *to_acpi_data_node(const struct fwnode_handle *fwnode) { return NULL; } -static inline bool acpi_data_node_match(struct fwnode_handle *fwnode, +static inline bool acpi_data_node_match(const struct fwnode_handle *fwnode, const char *name) { return false; @@ -911,7 +917,7 @@ acpi_create_platform_device(struct acpi_device *adev, return NULL; } -static inline bool acpi_dma_supported(struct acpi_device *adev) +static inline bool acpi_dma_supported(const struct acpi_device *adev) { return false; } @@ -965,6 +971,11 @@ static inline struct acpi_device *acpi_resource_consumer(struct resource *res) return NULL; } +static inline int acpi_get_local_address(acpi_handle handle, u32 *addr) +{ + return -ENODEV; +} + #endif /* !CONFIG_ACPI */ #ifdef CONFIG_ACPI_HOTPLUG_IOAPIC @@ -1004,6 +1015,7 @@ int acpi_dev_resume(struct device *dev); int acpi_subsys_runtime_suspend(struct device *dev); int acpi_subsys_runtime_resume(struct device *dev); int acpi_dev_pm_attach(struct device *dev, bool power_on); +bool acpi_storage_d3(struct device *dev); #else static inline int acpi_subsys_runtime_suspend(struct device *dev) { return 0; } static inline int acpi_subsys_runtime_resume(struct device *dev) { return 0; } @@ -1011,6 +1023,10 @@ static inline int acpi_dev_pm_attach(struct device *dev, bool power_on) { return 0; } +static inline bool acpi_storage_d3(struct device *dev) +{ + return false; +} #endif #if defined(CONFIG_ACPI) && defined(CONFIG_PM_SLEEP) @@ -1096,6 +1112,8 @@ void __acpi_handle_debug(struct _ddebug *descriptor, acpi_handle handle, const c #if defined(CONFIG_ACPI) && defined(CONFIG_GPIOLIB) bool acpi_gpio_get_irq_resource(struct acpi_resource *ares, struct acpi_resource_gpio **agpio); +bool acpi_gpio_get_io_resource(struct acpi_resource *ares, + struct acpi_resource_gpio **agpio); int acpi_dev_gpio_irq_get_by(struct acpi_device *adev, const char *name, int index); #else static inline bool acpi_gpio_get_irq_resource(struct acpi_resource *ares, @@ -1103,6 +1121,11 @@ static inline bool acpi_gpio_get_irq_resource(struct acpi_resource *ares, { return false; } +static inline bool acpi_gpio_get_io_resource(struct acpi_resource *ares, + struct acpi_resource_gpio **agpio) +{ + return false; +} static inline int acpi_dev_gpio_irq_get_by(struct acpi_device *adev, const char *name, int index) { diff --git a/include/linux/acpi_iort.h b/include/linux/acpi_iort.h index 1a12baa58e..f1f0842a2c 100644 --- a/include/linux/acpi_iort.h +++ b/include/linux/acpi_iort.h @@ -34,9 +34,8 @@ struct irq_domain *iort_get_device_domain(struct device *dev, u32 id, void acpi_configure_pmsi_domain(struct device *dev); int iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id); /* IOMMU interface */ -void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *size); -const struct iommu_ops *iort_iommu_configure_id(struct device *dev, - const u32 *id_in); +int iort_dma_get_ranges(struct device *dev, u64 *size); +int iort_iommu_configure_id(struct device *dev, const u32 *id_in); int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head); phys_addr_t acpi_iort_dma_get_max_cpu_address(void); #else @@ -48,11 +47,10 @@ static inline struct irq_domain *iort_get_device_domain( { return NULL; } static inline void acpi_configure_pmsi_domain(struct device *dev) { } /* IOMMU interface */ -static inline void iort_dma_setup(struct device *dev, u64 *dma_addr, - u64 *size) { } -static inline const struct iommu_ops *iort_iommu_configure_id( - struct device *dev, const u32 *id_in) -{ return NULL; } +static inline int iort_dma_get_ranges(struct device *dev, u64 *size) +{ return -ENODEV; } +static inline int iort_iommu_configure_id(struct device *dev, const u32 *id_in) +{ return -ENODEV; } static inline int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head) { return 0; } diff --git a/include/linux/acpi_mdio.h b/include/linux/acpi_mdio.h new file mode 100644 index 0000000000..0a24ab7cb6 --- /dev/null +++ b/include/linux/acpi_mdio.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * ACPI helper for the MDIO (Ethernet PHY) API + */ + +#ifndef __LINUX_ACPI_MDIO_H +#define __LINUX_ACPI_MDIO_H + +#include + +#if IS_ENABLED(CONFIG_ACPI_MDIO) +int acpi_mdiobus_register(struct mii_bus *mdio, struct fwnode_handle *fwnode); +#else /* CONFIG_ACPI_MDIO */ +static inline int +acpi_mdiobus_register(struct mii_bus *mdio, struct fwnode_handle *fwnode) +{ + /* + * Fall back to mdiobus_register() function to register a bus. + * This way, we don't have to keep compat bits around in drivers. + */ + + return mdiobus_register(mdio); +} +#endif + +#endif /* __LINUX_ACPI_MDIO_H */ diff --git a/include/linux/acpi_viot.h b/include/linux/acpi_viot.h new file mode 100644 index 0000000000..1eb8ee5b0e --- /dev/null +++ b/include/linux/acpi_viot.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef __ACPI_VIOT_H__ +#define __ACPI_VIOT_H__ + +#include + +#ifdef CONFIG_ACPI_VIOT +void __init acpi_viot_init(void); +int viot_iommu_configure(struct device *dev); +#else +static inline void acpi_viot_init(void) {} +static inline int viot_iommu_configure(struct device *dev) +{ + return -ENODEV; +} +#endif + +#endif /* __ACPI_VIOT_H__ */ diff --git a/include/linux/adreno-smmu-priv.h b/include/linux/adreno-smmu-priv.h index a889f28afb..c637e0997f 100644 --- a/include/linux/adreno-smmu-priv.h +++ b/include/linux/adreno-smmu-priv.h @@ -8,6 +8,32 @@ #include +/** + * struct adreno_smmu_fault_info - container for key fault information + * + * @far: The faulting IOVA from ARM_SMMU_CB_FAR + * @ttbr0: The current TTBR0 pagetable from ARM_SMMU_CB_TTBR0 + * @contextidr: The value of ARM_SMMU_CB_CONTEXTIDR + * @fsr: The fault status from ARM_SMMU_CB_FSR + * @fsynr0: The value of FSYNR0 from ARM_SMMU_CB_FSYNR0 + * @fsynr1: The value of FSYNR1 from ARM_SMMU_CB_FSYNR0 + * @cbfrsynra: The value of CBFRSYNRA from ARM_SMMU_GR1_CBFRSYNRA(idx) + * + * This struct passes back key page fault information to the GPU driver + * through the get_fault_info function pointer. + * The GPU driver can use this information to print informative + * log messages and provide deeper GPU specific insight into the fault. + */ +struct adreno_smmu_fault_info { + u64 far; + u64 ttbr0; + u32 contextidr; + u32 fsr; + u32 fsynr0; + u32 fsynr1; + u32 cbfrsynra; +}; + /** * struct adreno_smmu_priv - private interface between adreno-smmu and GPU * @@ -17,6 +43,13 @@ * @set_ttbr0_cfg: Set the TTBR0 config for the GPUs context bank. A * NULL config disables TTBR0 translation, otherwise * TTBR0 translation is enabled with the specified cfg + * @get_fault_info: Called by the GPU fault handler to get information about + * the fault + * @set_stall: Configure whether stall on fault (CFCFG) is enabled. Call + * before set_ttbr0_cfg(). If stalling on fault is enabled, + * the GPU driver must call resume_translation() + * @resume_translation: Resume translation after a fault + * * * The GPU driver (drm/msm) and adreno-smmu work together for controlling * the GPU's SMMU instance. This is by necessity, as the GPU is directly @@ -31,6 +64,9 @@ struct adreno_smmu_priv { const void *cookie; const struct io_pgtable_cfg *(*get_ttbr1_cfg)(const void *cookie); int (*set_ttbr0_cfg)(const void *cookie, const struct io_pgtable_cfg *cfg); + void (*get_fault_info)(const void *cookie, struct adreno_smmu_fault_info *info); + void (*set_stall)(const void *cookie, bool enabled); + void (*resume_translation)(const void *cookie, bool terminate); }; -#endif /* __ADRENO_SMMU_PRIV_H */ \ No newline at end of file +#endif /* __ADRENO_SMMU_PRIV_H */ diff --git a/include/linux/arch_topology.h b/include/linux/arch_topology.h index 11e555cfae..f180240dc9 100644 --- a/include/linux/arch_topology.h +++ b/include/linux/arch_topology.h @@ -37,6 +37,7 @@ bool topology_scale_freq_invariant(void); enum scale_freq_source { SCALE_FREQ_SOURCE_CPUFREQ = 0, SCALE_FREQ_SOURCE_ARCH, + SCALE_FREQ_SOURCE_CPPC, }; struct scale_freq_data { diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h index 6861489a18..7d1cabe152 100644 --- a/include/linux/arm-smccc.h +++ b/include/linux/arm-smccc.h @@ -63,6 +63,9 @@ #define ARM_SMCCC_VERSION_1_0 0x10000 #define ARM_SMCCC_VERSION_1_1 0x10001 #define ARM_SMCCC_VERSION_1_2 0x10002 +#define ARM_SMCCC_VERSION_1_3 0x10003 + +#define ARM_SMCCC_1_3_SVE_HINT 0x10000 #define ARM_SMCCC_VERSION_FUNC_ID \ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ @@ -216,6 +219,8 @@ u32 arm_smccc_get_version(void); void __init arm_smccc_version_init(u32 version, enum arm_smccc_conduit conduit); +extern u64 smccc_has_sve_hint; + /** * struct arm_smccc_res - Result from SMC/HVC call * @a0-a3 result values from registers 0 to 3 @@ -227,6 +232,61 @@ struct arm_smccc_res { unsigned long a3; }; +#ifdef CONFIG_ARM64 +/** + * struct arm_smccc_1_2_regs - Arguments for or Results from SMC/HVC call + * @a0-a17 argument values from registers 0 to 17 + */ +struct arm_smccc_1_2_regs { + unsigned long a0; + unsigned long a1; + unsigned long a2; + unsigned long a3; + unsigned long a4; + unsigned long a5; + unsigned long a6; + unsigned long a7; + unsigned long a8; + unsigned long a9; + unsigned long a10; + unsigned long a11; + unsigned long a12; + unsigned long a13; + unsigned long a14; + unsigned long a15; + unsigned long a16; + unsigned long a17; +}; + +/** + * arm_smccc_1_2_hvc() - make HVC calls + * @args: arguments passed via struct arm_smccc_1_2_regs + * @res: result values via struct arm_smccc_1_2_regs + * + * This function is used to make HVC calls following SMC Calling Convention + * v1.2 or above. The content of the supplied param are copied from the + * structure to registers prior to the HVC instruction. The return values + * are updated with the content from registers on return from the HVC + * instruction. + */ +asmlinkage void arm_smccc_1_2_hvc(const struct arm_smccc_1_2_regs *args, + struct arm_smccc_1_2_regs *res); + +/** + * arm_smccc_1_2_smc() - make SMC calls + * @args: arguments passed via struct arm_smccc_1_2_regs + * @res: result values via struct arm_smccc_1_2_regs + * + * This function is used to make SMC calls following SMC Calling Convention + * v1.2 or above. The content of the supplied param are copied from the + * structure to registers prior to the SMC instruction. The return values + * are updated with the content from registers on return from the SMC + * instruction. + */ +asmlinkage void arm_smccc_1_2_smc(const struct arm_smccc_1_2_regs *args, + struct arm_smccc_1_2_regs *res); +#endif + /** * struct arm_smccc_quirk - Contains quirk information * @id: quirk identification @@ -240,6 +300,15 @@ struct arm_smccc_quirk { } state; }; +/** + * __arm_smccc_sve_check() - Set the SVE hint bit when doing SMC calls + * + * Sets the SMCCC hint bit to indicate if there is live state in the SVE + * registers, this modifies x0 in place and should never be called from C + * code. + */ +asmlinkage unsigned long __arm_smccc_sve_check(unsigned long x0); + /** * __arm_smccc_smc() - make SMC calls * @a0-a7: arguments passed in registers 0 to 7 @@ -297,6 +366,20 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1, #endif +/* nVHE hypervisor doesn't have a current thread so needs separate checks */ +#if defined(CONFIG_ARM64_SVE) && !defined(__KVM_NVHE_HYPERVISOR__) + +#define SMCCC_SVE_CHECK ALTERNATIVE("nop \n", "bl __arm_smccc_sve_check \n", \ + ARM64_SVE) +#define smccc_sve_clobbers "x16", "x30", "cc", + +#else + +#define SMCCC_SVE_CHECK +#define smccc_sve_clobbers + +#endif + #define ___count_args(_0, _1, _2, _3, _4, _5, _6, _7, _8, x, ...) x #define __count_args(...) \ @@ -364,7 +447,7 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1, #define ___constraints(count) \ : __constraint_read_ ## count \ - : "memory" + : smccc_sve_clobbers "memory" #define __constraints(count) ___constraints(count) /* @@ -379,7 +462,8 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1, register unsigned long r2 asm("r2"); \ register unsigned long r3 asm("r3"); \ __declare_args(__count_args(__VA_ARGS__), __VA_ARGS__); \ - asm volatile(inst "\n" : \ + asm volatile(SMCCC_SVE_CHECK \ + inst "\n" : \ "=r" (r0), "=r" (r1), "=r" (r2), "=r" (r3) \ __constraints(__count_args(__VA_ARGS__))); \ if (___res) \ diff --git a/include/linux/arm_ffa.h b/include/linux/arm_ffa.h new file mode 100644 index 0000000000..505c679b6a --- /dev/null +++ b/include/linux/arm_ffa.h @@ -0,0 +1,267 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2021 ARM Ltd. + */ + +#ifndef _LINUX_ARM_FFA_H +#define _LINUX_ARM_FFA_H + +#include +#include +#include +#include + +/* FFA Bus/Device/Driver related */ +struct ffa_device { + int vm_id; + bool mode_32bit; + uuid_t uuid; + struct device dev; +}; + +#define to_ffa_dev(d) container_of(d, struct ffa_device, dev) + +struct ffa_device_id { + uuid_t uuid; +}; + +struct ffa_driver { + const char *name; + int (*probe)(struct ffa_device *sdev); + void (*remove)(struct ffa_device *sdev); + const struct ffa_device_id *id_table; + + struct device_driver driver; +}; + +#define to_ffa_driver(d) container_of(d, struct ffa_driver, driver) + +static inline void ffa_dev_set_drvdata(struct ffa_device *fdev, void *data) +{ + fdev->dev.driver_data = data; +} + +#if IS_REACHABLE(CONFIG_ARM_FFA_TRANSPORT) +struct ffa_device *ffa_device_register(const uuid_t *uuid, int vm_id); +void ffa_device_unregister(struct ffa_device *ffa_dev); +int ffa_driver_register(struct ffa_driver *driver, struct module *owner, + const char *mod_name); +void ffa_driver_unregister(struct ffa_driver *driver); +bool ffa_device_is_valid(struct ffa_device *ffa_dev); +const struct ffa_dev_ops *ffa_dev_ops_get(struct ffa_device *dev); + +#else +static inline +struct ffa_device *ffa_device_register(const uuid_t *uuid, int vm_id) +{ + return NULL; +} + +static inline void ffa_device_unregister(struct ffa_device *dev) {} + +static inline int +ffa_driver_register(struct ffa_driver *driver, struct module *owner, + const char *mod_name) +{ + return -EINVAL; +} + +static inline void ffa_driver_unregister(struct ffa_driver *driver) {} + +static inline +bool ffa_device_is_valid(struct ffa_device *ffa_dev) { return false; } + +static inline +const struct ffa_dev_ops *ffa_dev_ops_get(struct ffa_device *dev) +{ + return NULL; +} +#endif /* CONFIG_ARM_FFA_TRANSPORT */ + +#define ffa_register(driver) \ + ffa_driver_register(driver, THIS_MODULE, KBUILD_MODNAME) +#define ffa_unregister(driver) \ + ffa_driver_unregister(driver) + +/** + * module_ffa_driver() - Helper macro for registering a psa_ffa driver + * @__ffa_driver: ffa_driver structure + * + * Helper macro for psa_ffa drivers to set up proper module init / exit + * functions. Replaces module_init() and module_exit() and keeps people from + * printing pointless things to the kernel log when their driver is loaded. + */ +#define module_ffa_driver(__ffa_driver) \ + module_driver(__ffa_driver, ffa_register, ffa_unregister) + +/* FFA transport related */ +struct ffa_partition_info { + u16 id; + u16 exec_ctxt; +/* partition supports receipt of direct requests */ +#define FFA_PARTITION_DIRECT_RECV BIT(0) +/* partition can send direct requests. */ +#define FFA_PARTITION_DIRECT_SEND BIT(1) +/* partition can send and receive indirect messages. */ +#define FFA_PARTITION_INDIRECT_MSG BIT(2) + u32 properties; +}; + +/* For use with FFA_MSG_SEND_DIRECT_{REQ,RESP} which pass data via registers */ +struct ffa_send_direct_data { + unsigned long data0; /* w3/x3 */ + unsigned long data1; /* w4/x4 */ + unsigned long data2; /* w5/x5 */ + unsigned long data3; /* w6/x6 */ + unsigned long data4; /* w7/x7 */ +}; + +struct ffa_mem_region_addr_range { + /* The base IPA of the constituent memory region, aligned to 4 kiB */ + u64 address; + /* The number of 4 kiB pages in the constituent memory region. */ + u32 pg_cnt; + u32 reserved; +}; + +struct ffa_composite_mem_region { + /* + * The total number of 4 kiB pages included in this memory region. This + * must be equal to the sum of page counts specified in each + * `struct ffa_mem_region_addr_range`. + */ + u32 total_pg_cnt; + /* The number of constituents included in this memory region range */ + u32 addr_range_cnt; + u64 reserved; + /** An array of `addr_range_cnt` memory region constituents. */ + struct ffa_mem_region_addr_range constituents[]; +}; + +struct ffa_mem_region_attributes { + /* The ID of the VM to which the memory is being given or shared. */ + u16 receiver; + /* + * The permissions with which the memory region should be mapped in the + * receiver's page table. + */ +#define FFA_MEM_EXEC BIT(3) +#define FFA_MEM_NO_EXEC BIT(2) +#define FFA_MEM_RW BIT(1) +#define FFA_MEM_RO BIT(0) + u8 attrs; + /* + * Flags used during FFA_MEM_RETRIEVE_REQ and FFA_MEM_RETRIEVE_RESP + * for memory regions with multiple borrowers. + */ +#define FFA_MEM_RETRIEVE_SELF_BORROWER BIT(0) + u8 flag; + u32 composite_off; + /* + * Offset in bytes from the start of the outer `ffa_memory_region` to + * an `struct ffa_mem_region_addr_range`. + */ + u64 reserved; +}; + +struct ffa_mem_region { + /* The ID of the VM/owner which originally sent the memory region */ + u16 sender_id; +#define FFA_MEM_NORMAL BIT(5) +#define FFA_MEM_DEVICE BIT(4) + +#define FFA_MEM_WRITE_BACK (3 << 2) +#define FFA_MEM_NON_CACHEABLE (1 << 2) + +#define FFA_DEV_nGnRnE (0 << 2) +#define FFA_DEV_nGnRE (1 << 2) +#define FFA_DEV_nGRE (2 << 2) +#define FFA_DEV_GRE (3 << 2) + +#define FFA_MEM_NON_SHAREABLE (0) +#define FFA_MEM_OUTER_SHAREABLE (2) +#define FFA_MEM_INNER_SHAREABLE (3) + u8 attributes; + u8 reserved_0; +/* + * Clear memory region contents after unmapping it from the sender and + * before mapping it for any receiver. + */ +#define FFA_MEM_CLEAR BIT(0) +/* + * Whether the hypervisor may time slice the memory sharing or retrieval + * operation. + */ +#define FFA_TIME_SLICE_ENABLE BIT(1) + +#define FFA_MEM_RETRIEVE_TYPE_IN_RESP (0 << 3) +#define FFA_MEM_RETRIEVE_TYPE_SHARE (1 << 3) +#define FFA_MEM_RETRIEVE_TYPE_LEND (2 << 3) +#define FFA_MEM_RETRIEVE_TYPE_DONATE (3 << 3) + +#define FFA_MEM_RETRIEVE_ADDR_ALIGN_HINT BIT(9) +#define FFA_MEM_RETRIEVE_ADDR_ALIGN(x) ((x) << 5) + /* Flags to control behaviour of the transaction. */ + u32 flags; +#define HANDLE_LOW_MASK GENMASK_ULL(31, 0) +#define HANDLE_HIGH_MASK GENMASK_ULL(63, 32) +#define HANDLE_LOW(x) ((u32)(FIELD_GET(HANDLE_LOW_MASK, (x)))) +#define HANDLE_HIGH(x) ((u32)(FIELD_GET(HANDLE_HIGH_MASK, (x)))) + +#define PACK_HANDLE(l, h) \ + (FIELD_PREP(HANDLE_LOW_MASK, (l)) | FIELD_PREP(HANDLE_HIGH_MASK, (h))) + /* + * A globally-unique ID assigned by the hypervisor for a region + * of memory being sent between VMs. + */ + u64 handle; + /* + * An implementation defined value associated with the receiver and the + * memory region. + */ + u64 tag; + u32 reserved_1; + /* + * The number of `ffa_mem_region_attributes` entries included in this + * transaction. + */ + u32 ep_count; + /* + * An array of endpoint memory access descriptors. + * Each one specifies a memory region offset, an endpoint and the + * attributes with which this memory region should be mapped in that + * endpoint's page table. + */ + struct ffa_mem_region_attributes ep_mem_access[]; +}; + +#define COMPOSITE_OFFSET(x) \ + (offsetof(struct ffa_mem_region, ep_mem_access[x])) +#define CONSTITUENTS_OFFSET(x) \ + (offsetof(struct ffa_composite_mem_region, constituents[x])) +#define COMPOSITE_CONSTITUENTS_OFFSET(x, y) \ + (COMPOSITE_OFFSET(x) + CONSTITUENTS_OFFSET(y)) + +struct ffa_mem_ops_args { + bool use_txbuf; + u32 nattrs; + u32 flags; + u64 tag; + u64 g_handle; + struct scatterlist *sg; + struct ffa_mem_region_attributes *attrs; +}; + +struct ffa_dev_ops { + u32 (*api_version_get)(void); + int (*partition_info_get)(const char *uuid_str, + struct ffa_partition_info *buffer); + void (*mode_32bit_set)(struct ffa_device *dev); + int (*sync_send_receive)(struct ffa_device *dev, + struct ffa_send_direct_data *data); + int (*memory_reclaim)(u64 g_handle, u32 flags); + int (*memory_share)(struct ffa_device *dev, + struct ffa_mem_ops_args *args); +}; + +#endif /* _LINUX_ARM_FFA_H */ diff --git a/include/linux/ascii85.h b/include/linux/ascii85.h index 4cc4020127..83ad775ad0 100644 --- a/include/linux/ascii85.h +++ b/include/linux/ascii85.h @@ -8,7 +8,8 @@ #ifndef _ASCII85_H_ #define _ASCII85_H_ -#include +#include +#include #define ASCII85_BUFSZ 6 diff --git a/include/linux/ata.h b/include/linux/ata.h index 6e67aded28..1b44f40c77 100644 --- a/include/linux/ata.h +++ b/include/linux/ata.h @@ -13,7 +13,7 @@ #ifndef __LINUX_ATA_H__ #define __LINUX_ATA_H__ -#include +#include #include #include #include diff --git a/include/linux/atomic.h b/include/linux/atomic.h index 571a11008a..ed1d3ffd5b 100644 --- a/include/linux/atomic.h +++ b/include/linux/atomic.h @@ -77,12 +77,8 @@ __ret; \ }) -#ifdef ARCH_ATOMIC #include #include -#else -#include -#endif #include diff --git a/include/linux/avf/virtchnl.h b/include/linux/avf/virtchnl.h index 8612f8fc86..db0e099c23 100644 --- a/include/linux/avf/virtchnl.h +++ b/include/linux/avf/virtchnl.h @@ -412,9 +412,36 @@ VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_select); * PF removes the filters and returns status. */ +/* VIRTCHNL_ETHER_ADDR_LEGACY + * Prior to adding the @type member to virtchnl_ether_addr, there were 2 pad + * bytes. Moving forward all VF drivers should not set type to + * VIRTCHNL_ETHER_ADDR_LEGACY. This is only here to not break previous/legacy + * behavior. The control plane function (i.e. PF) can use a best effort method + * of tracking the primary/device unicast in this case, but there is no + * guarantee and functionality depends on the implementation of the PF. + */ + +/* VIRTCHNL_ETHER_ADDR_PRIMARY + * All VF drivers should set @type to VIRTCHNL_ETHER_ADDR_PRIMARY for the + * primary/device unicast MAC address filter for VIRTCHNL_OP_ADD_ETH_ADDR and + * VIRTCHNL_OP_DEL_ETH_ADDR. This allows for the underlying control plane + * function (i.e. PF) to accurately track and use this MAC address for + * displaying on the host and for VM/function reset. + */ + +/* VIRTCHNL_ETHER_ADDR_EXTRA + * All VF drivers should set @type to VIRTCHNL_ETHER_ADDR_EXTRA for any extra + * unicast and/or multicast filters that are being added/deleted via + * VIRTCHNL_OP_DEL_ETH_ADDR/VIRTCHNL_OP_ADD_ETH_ADDR respectively. + */ struct virtchnl_ether_addr { u8 addr[ETH_ALEN]; - u8 pad[2]; + u8 type; +#define VIRTCHNL_ETHER_ADDR_LEGACY 0 +#define VIRTCHNL_ETHER_ADDR_PRIMARY 1 +#define VIRTCHNL_ETHER_ADDR_EXTRA 2 +#define VIRTCHNL_ETHER_ADDR_TYPE_MASK 3 /* first two bits of type are valid */ + u8 pad; }; VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_ether_addr); diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h index fff9367a63..1d7edad991 100644 --- a/include/linux/backing-dev-defs.h +++ b/include/linux/backing-dev-defs.h @@ -154,6 +154,8 @@ struct bdi_writeback { struct cgroup_subsys_state *blkcg_css; /* and blkcg */ struct list_head memcg_node; /* anchored at memcg->cgwb_list */ struct list_head blkcg_node; /* anchored at blkcg->cgwb_list */ + struct list_head b_attached; /* attached inodes, protected by list_lock */ + struct list_head offline_node; /* anchored at offline_cgwbs */ union { struct work_struct release_work; @@ -239,8 +241,9 @@ static inline void wb_get(struct bdi_writeback *wb) /** * wb_put - decrement a wb's refcount * @wb: bdi_writeback to put + * @nr: number of references to put */ -static inline void wb_put(struct bdi_writeback *wb) +static inline void wb_put_many(struct bdi_writeback *wb, unsigned long nr) { if (WARN_ON_ONCE(!wb->bdi)) { /* @@ -251,7 +254,16 @@ static inline void wb_put(struct bdi_writeback *wb) } if (wb != &wb->bdi->wb) - percpu_ref_put(&wb->refcnt); + percpu_ref_put_many(&wb->refcnt, nr); +} + +/** + * wb_put - decrement a wb's refcount + * @wb: bdi_writeback to put + */ +static inline void wb_put(struct bdi_writeback *wb) +{ + wb_put_many(wb, 1); } /** @@ -280,6 +292,10 @@ static inline void wb_put(struct bdi_writeback *wb) { } +static inline void wb_put_many(struct bdi_writeback *wb, unsigned long nr) +{ +} + static inline bool wb_dying(struct bdi_writeback *wb) { return false; diff --git a/include/linux/bio.h b/include/linux/bio.h index a0b4cfdf62..2203b686e1 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -44,9 +44,6 @@ static inline unsigned int bio_max_segs(unsigned int nr_segs) #define bio_offset(bio) bio_iter_offset((bio), (bio)->bi_iter) #define bio_iovec(bio) bio_iter_iovec((bio), (bio)->bi_iter) -#define bio_multiple_segments(bio) \ - ((bio)->bi_iter.bi_size != bio_iovec(bio).bv_len) - #define bvec_iter_sectors(iter) ((iter).bi_size >> 9) #define bvec_iter_end_sector(iter) ((iter).bi_sector + bvec_iter_sectors((iter))) @@ -271,7 +268,7 @@ static inline void bio_clear_flag(struct bio *bio, unsigned int bit) static inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv) { - *bv = bio_iovec(bio); + *bv = mp_bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter); } static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv) @@ -279,10 +276,9 @@ static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv) struct bvec_iter iter = bio->bi_iter; int idx; - if (unlikely(!bio_multiple_segments(bio))) { - *bv = bio_iovec(bio); - return; - } + bio_get_first_bvec(bio, bv); + if (bv->bv_len == bio->bi_iter.bi_size) + return; /* this bio only has a single bvec */ bio_advance_iter(bio, &iter, iter.bi_size); @@ -822,4 +818,6 @@ static inline void bio_set_polled(struct bio *bio, struct kiocb *kiocb) bio->bi_opf |= REQ_NOWAIT; } +struct bio *blk_next_bio(struct bio *bio, unsigned int nr_pages, gfp_t gfp); + #endif /* __LINUX_BIO_H */ diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h index b9f3c246c3..3704843887 100644 --- a/include/linux/blk-cgroup.h +++ b/include/linux/blk-cgroup.h @@ -30,6 +30,8 @@ /* Max limits for throttle policy */ #define THROTL_IOPS_MAX UINT_MAX +#define FC_APPID_LEN 129 + #ifdef CONFIG_BLK_CGROUP @@ -55,6 +57,9 @@ struct blkcg { struct blkcg_policy_data *cpd[BLKCG_MAX_POLS]; struct list_head all_blkcgs_node; +#ifdef CONFIG_BLK_CGROUP_FC_APPID + char fc_app_id[FC_APPID_LEN]; +#endif #ifdef CONFIG_CGROUP_WRITEBACK struct list_head cgwb_list; #endif @@ -660,4 +665,62 @@ static inline void blk_cgroup_bio_start(struct bio *bio) { } #endif /* CONFIG_BLOCK */ #endif /* CONFIG_BLK_CGROUP */ + +#ifdef CONFIG_BLK_CGROUP_FC_APPID +/* + * Sets the fc_app_id field associted to blkcg + * @app_id: application identifier + * @cgrp_id: cgroup id + * @app_id_len: size of application identifier + */ +static inline int blkcg_set_fc_appid(char *app_id, u64 cgrp_id, size_t app_id_len) +{ + struct cgroup *cgrp; + struct cgroup_subsys_state *css; + struct blkcg *blkcg; + int ret = 0; + + if (app_id_len > FC_APPID_LEN) + return -EINVAL; + + cgrp = cgroup_get_from_id(cgrp_id); + if (!cgrp) + return -ENOENT; + css = cgroup_get_e_css(cgrp, &io_cgrp_subsys); + if (!css) { + ret = -ENOENT; + goto out_cgrp_put; + } + blkcg = css_to_blkcg(css); + /* + * There is a slight race condition on setting the appid. + * Worst case an I/O may not find the right id. + * This is no different from the I/O we let pass while obtaining + * the vmid from the fabric. + * Adding the overhead of a lock is not necessary. + */ + strlcpy(blkcg->fc_app_id, app_id, app_id_len); + css_put(css); +out_cgrp_put: + cgroup_put(cgrp); + return ret; +} + +/** + * blkcg_get_fc_appid - get the fc app identifier associated with a bio + * @bio: target bio + * + * On success return the fc_app_id, on failure return NULL + */ +static inline char *blkcg_get_fc_appid(struct bio *bio) +{ + if (bio && bio->bi_blkg && + (bio->bi_blkg->blkcg->fc_app_id[0] != '\0')) + return bio->bi_blkg->blkcg->fc_app_id; + return NULL; +} +#else +static inline int blkcg_set_fc_appid(char *buf, u64 id, size_t len) { return -EINVAL; } +static inline char *blkcg_get_fc_appid(struct bio *bio) { return NULL; } +#endif /*CONFIG_BLK_CGROUP_FC_APPID*/ #endif /* _BLK_CGROUP_H */ diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 359486940f..1d18447ebe 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -426,19 +426,27 @@ enum { ((policy & ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) \ << BLK_MQ_F_ALLOC_POLICY_START_BIT) -struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *); -struct request_queue *blk_mq_init_queue_data(struct blk_mq_tag_set *set, +#define blk_mq_alloc_disk(set, queuedata) \ +({ \ + static struct lock_class_key __key; \ + struct gendisk *__disk = __blk_mq_alloc_disk(set, queuedata); \ + \ + if (!IS_ERR(__disk)) \ + lockdep_init_map(&__disk->lockdep_map, \ + "(bio completion)", &__key, 0); \ + __disk; \ +}) +struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, void *queuedata); -struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, - struct request_queue *q, - bool elevator_init); -struct request_queue *blk_mq_init_sq_queue(struct blk_mq_tag_set *set, - const struct blk_mq_ops *ops, - unsigned int queue_depth, - unsigned int set_flags); +struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *); +int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, + struct request_queue *q); void blk_mq_unregister_dev(struct device *, struct request_queue *); int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set); +int blk_mq_alloc_sq_tag_set(struct blk_mq_tag_set *set, + const struct blk_mq_ops *ops, unsigned int queue_depth, + unsigned int set_flags); void blk_mq_free_tag_set(struct blk_mq_tag_set *set); void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule); diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index db026b6ec1..290f9061b2 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -29,7 +29,6 @@ struct block_device { int bd_openers; struct inode * bd_inode; /* will die */ struct super_block * bd_super; - struct mutex bd_mutex; /* open/close mutex */ void * bd_claiming; struct device bd_device; void * bd_holder; @@ -40,9 +39,6 @@ struct block_device { #endif struct kobject *bd_holder_dir; u8 bd_partno; - /* number of times partitions within this device have been opened. */ - unsigned bd_part_count; - spinlock_t bd_size_lock; /* for bd_inode->i_size updates */ struct gendisk * bd_disk; struct backing_dev_info *bd_bdi; @@ -304,6 +300,7 @@ enum { BIO_CGROUP_ACCT, /* has been accounted to a cgroup */ BIO_TRACKED, /* set if bio goes through the rq_qos path */ BIO_REMAPPED, + BIO_ZONE_WRITE_LOCKED, /* Owns a zoned device zone write lock */ BIO_FLAG_LAST }; @@ -354,9 +351,6 @@ enum req_opf { /* reset all the zone present on the device */ REQ_OP_ZONE_RESET_ALL = 17, - /* SCSI passthrough using struct scsi_request */ - REQ_OP_SCSI_IN = 32, - REQ_OP_SCSI_OUT = 33, /* Driver private requests */ REQ_OP_DRV_IN = 34, REQ_OP_DRV_OUT = 35, diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index f69c75bd6d..3177181c43 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -25,6 +25,7 @@ #include #include #include +#include struct module; struct scsi_ioctl_command; @@ -239,42 +240,15 @@ struct request { void *end_io_data; }; -static inline bool blk_op_is_scsi(unsigned int op) -{ - return op == REQ_OP_SCSI_IN || op == REQ_OP_SCSI_OUT; -} - -static inline bool blk_op_is_private(unsigned int op) +static inline bool blk_op_is_passthrough(unsigned int op) { + op &= REQ_OP_MASK; return op == REQ_OP_DRV_IN || op == REQ_OP_DRV_OUT; } -static inline bool blk_rq_is_scsi(struct request *rq) -{ - return blk_op_is_scsi(req_op(rq)); -} - -static inline bool blk_rq_is_private(struct request *rq) -{ - return blk_op_is_private(req_op(rq)); -} - static inline bool blk_rq_is_passthrough(struct request *rq) { - return blk_rq_is_scsi(rq) || blk_rq_is_private(rq); -} - -static inline bool bio_is_passthrough(struct bio *bio) -{ - unsigned op = bio_op(bio); - - return blk_op_is_scsi(op) || blk_op_is_private(op); -} - -static inline bool blk_op_is_passthrough(unsigned int op) -{ - return (blk_op_is_scsi(op & REQ_OP_MASK) || - blk_op_is_private(op & REQ_OP_MASK)); + return blk_op_is_passthrough(req_op(rq)); } static inline unsigned short req_get_ioprio(struct request *req) @@ -493,6 +467,9 @@ struct request_queue { atomic_t nr_active_requests_shared_sbitmap; + struct sbitmap_queue sched_bitmap_tags; + struct sbitmap_queue sched_breserved_tags; + struct list_head icq_list; #ifdef CONFIG_BLK_CGROUP DECLARE_BITMAP (blkcg_pols, BLKCG_MAX_POLS); @@ -932,10 +909,12 @@ extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, uns extern int blk_rq_map_user_iov(struct request_queue *, struct request *, struct rq_map_data *, const struct iov_iter *, gfp_t); -extern void blk_execute_rq(struct gendisk *, struct request *, int); extern void blk_execute_rq_nowait(struct gendisk *, struct request *, int, rq_end_io_fn *); +blk_status_t blk_execute_rq(struct gendisk *bd_disk, struct request *rq, + int at_head); + /* Helper to convert REQ_OP_XXX to its string format XXX */ extern const char *blk_op_str(unsigned int op); @@ -1008,6 +987,18 @@ static inline unsigned int blk_rq_stats_sectors(const struct request *rq) /* Helper to convert BLK_ZONE_ZONE_XXX to its string format XXX */ const char *blk_zone_cond_str(enum blk_zone_cond zone_cond); +static inline unsigned int bio_zone_no(struct bio *bio) +{ + return blk_queue_zone_no(bdev_get_queue(bio->bi_bdev), + bio->bi_iter.bi_sector); +} + +static inline unsigned int bio_zone_is_seq(struct bio *bio) +{ + return blk_queue_zone_is_seq(bdev_get_queue(bio->bi_bdev), + bio->bi_iter.bi_sector); +} + static inline unsigned int blk_rq_zone_no(struct request *rq) { return blk_queue_zone_no(rq->q, blk_rq_pos(rq)); @@ -1209,7 +1200,6 @@ static inline int blk_rq_map_sg(struct request_queue *q, struct request *rq, extern void blk_dump_rq_flags(struct request *, char *); bool __must_check blk_get_queue(struct request_queue *); -struct request_queue *blk_alloc_queue(int node_id); extern void blk_put_queue(struct request_queue *); extern void blk_set_queue_dying(struct request_queue *); diff --git a/include/linux/bootconfig.h b/include/linux/bootconfig.h index 2696eb0fc1..abe089c275 100644 --- a/include/linux/bootconfig.h +++ b/include/linux/bootconfig.h @@ -16,6 +16,26 @@ #define BOOTCONFIG_ALIGN (1 << BOOTCONFIG_ALIGN_SHIFT) #define BOOTCONFIG_ALIGN_MASK (BOOTCONFIG_ALIGN - 1) +/** + * xbc_calc_checksum() - Calculate checksum of bootconfig + * @data: Bootconfig data. + * @size: The size of the bootconfig data. + * + * Calculate the checksum value of the bootconfig data. + * The checksum will be used with the BOOTCONFIG_MAGIC and the size for + * embedding the bootconfig in the initrd image. + */ +static inline __init u32 xbc_calc_checksum(void *data, u32 size) +{ + unsigned char *p = data; + u32 ret = 0; + + while (size--) + ret += *p++; + + return ret; +} + /* XBC tree node */ struct xbc_node { u16 next; @@ -71,7 +91,7 @@ static inline __init bool xbc_node_is_key(struct xbc_node *node) */ static inline __init bool xbc_node_is_array(struct xbc_node *node) { - return xbc_node_is_value(node) && node->next != 0; + return xbc_node_is_value(node) && node->child != 0; } /** @@ -80,6 +100,8 @@ static inline __init bool xbc_node_is_array(struct xbc_node *node) * * Test the @node is a leaf key node which is a key node and has a value node * or no child. Returns true if it is a leaf node, or false if not. + * Note that the leaf node can have subkey nodes in addition to the + * value node. */ static inline __init bool xbc_node_is_leaf(struct xbc_node *node) { @@ -129,6 +151,23 @@ static inline struct xbc_node * __init xbc_find_node(const char *key) return xbc_node_find_child(NULL, key); } +/** + * xbc_node_get_subkey() - Return the first subkey node if exists + * @node: Parent node + * + * Return the first subkey node of the @node. If the @node has no child + * or only value node, this will return NULL. + */ +static inline struct xbc_node * __init xbc_node_get_subkey(struct xbc_node *node) +{ + struct xbc_node *child = xbc_node_get_child(node); + + if (child && xbc_node_is_value(child)) + return xbc_node_get_next(child); + else + return child; +} + /** * xbc_array_for_each_value() - Iterate value nodes on an array * @anode: An XBC arraied value node @@ -140,7 +179,7 @@ static inline struct xbc_node * __init xbc_find_node(const char *key) */ #define xbc_array_for_each_value(anode, value) \ for (value = xbc_node_get_data(anode); anode != NULL ; \ - anode = xbc_node_get_next(anode), \ + anode = xbc_node_get_child(anode), \ value = anode ? xbc_node_get_data(anode) : NULL) /** @@ -149,11 +188,24 @@ static inline struct xbc_node * __init xbc_find_node(const char *key) * @child: Iterated XBC node. * * Iterate child nodes of @parent. Each child nodes are stored to @child. + * The @child can be mixture of a value node and subkey nodes. */ #define xbc_node_for_each_child(parent, child) \ for (child = xbc_node_get_child(parent); child != NULL ; \ child = xbc_node_get_next(child)) +/** + * xbc_node_for_each_subkey() - Iterate child subkey nodes + * @parent: An XBC node. + * @child: Iterated XBC node. + * + * Iterate subkey nodes of @parent. Each child nodes are stored to @child. + * The @child is only the subkey node. + */ +#define xbc_node_for_each_subkey(parent, child) \ + for (child = xbc_node_get_subkey(parent); child != NULL ; \ + child = xbc_node_get_next(child)) + /** * xbc_node_for_each_array_value() - Iterate array entries of geven key * @node: An XBC node. @@ -162,16 +214,16 @@ static inline struct xbc_node * __init xbc_find_node(const char *key) * @value: Iterated value of array entry. * * Iterate array entries of given @key under @node. Each array entry node - * is stroed to @anode and @value. If the @node doesn't have @key node, + * is stored to @anode and @value. If the @node doesn't have @key node, * it does nothing. * Note that even if the found key node has only one value (not array) - * this executes block once. Hoever, if the found key node has no value + * this executes block once. However, if the found key node has no value * (key-only node), this does nothing. So don't use this for testing the * key-value pair existence. */ #define xbc_node_for_each_array_value(node, key, anode, value) \ for (value = xbc_node_find_value(node, key, &anode); value != NULL; \ - anode = xbc_node_get_next(anode), \ + anode = xbc_node_get_child(anode), \ value = anode ? xbc_node_get_data(anode) : NULL) /** diff --git a/include/linux/bootmem_info.h b/include/linux/bootmem_info.h new file mode 100644 index 0000000000..2bc8b1f69c --- /dev/null +++ b/include/linux/bootmem_info.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_BOOTMEM_INFO_H +#define __LINUX_BOOTMEM_INFO_H + +#include + +/* + * Types for free bootmem stored in page->lru.next. These have to be in + * some random range in unsigned long space for debugging purposes. + */ +enum { + MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE = 12, + SECTION_INFO = MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE, + MIX_SECTION_INFO, + NODE_INFO, + MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE = NODE_INFO, +}; + +#ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE +void __init register_page_bootmem_info_node(struct pglist_data *pgdat); + +void get_page_bootmem(unsigned long info, struct page *page, + unsigned long type); +void put_page_bootmem(struct page *page); + +/* + * Any memory allocated via the memblock allocator and not via the + * buddy will be marked reserved already in the memmap. For those + * pages, we can call this function to free it to buddy allocator. + */ +static inline void free_bootmem_page(struct page *page) +{ + unsigned long magic = (unsigned long)page->freelist; + + /* + * The reserve_bootmem_region sets the reserved flag on bootmem + * pages. + */ + VM_BUG_ON_PAGE(page_ref_count(page) != 2, page); + + if (magic == SECTION_INFO || magic == MIX_SECTION_INFO) + put_page_bootmem(page); + else + VM_BUG_ON_PAGE(1, page); +} +#else +static inline void register_page_bootmem_info_node(struct pglist_data *pgdat) +{ +} + +static inline void put_page_bootmem(struct page *page) +{ +} + +static inline void get_page_bootmem(unsigned long info, struct page *page, + unsigned long type) +{ +} + +static inline void free_bootmem_page(struct page *page) +{ + free_reserved_page(page); +} +#endif + +#endif /* __LINUX_BOOTMEM_INFO_H */ diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 02b02cb29c..e8e2b0393c 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -22,6 +22,7 @@ #include #include #include +#include struct bpf_verifier_env; struct bpf_verifier_log; @@ -69,6 +70,8 @@ struct bpf_map_ops { void *(*map_lookup_elem_sys_only)(struct bpf_map *map, void *key); int (*map_lookup_batch)(struct bpf_map *map, const union bpf_attr *attr, union bpf_attr __user *uattr); + int (*map_lookup_and_delete_elem)(struct bpf_map *map, void *key, + void *value, u64 flags); int (*map_lookup_and_delete_batch)(struct bpf_map *map, const union bpf_attr *attr, union bpf_attr __user *uattr); @@ -777,6 +780,7 @@ struct bpf_jit_poke_descriptor { void *tailcall_target; void *tailcall_bypass; void *bypass_addr; + void *aux; union { struct { struct bpf_map *map; @@ -1428,7 +1432,7 @@ struct bpf_iter__bpf_map_elem { int bpf_iter_reg_target(const struct bpf_iter_reg *reg_info); void bpf_iter_unreg_target(const struct bpf_iter_reg *reg_info); bool bpf_iter_prog_supported(struct bpf_prog *prog); -int bpf_iter_link_attach(const union bpf_attr *attr, struct bpf_prog *prog); +int bpf_iter_link_attach(const union bpf_attr *attr, bpfptr_t uattr, struct bpf_prog *prog); int bpf_iter_new_fd(struct bpf_link *link); bool bpf_link_is_iter(struct bpf_link *link); struct bpf_prog *bpf_iter_get_info(struct bpf_iter_meta *meta, bool in_stop); @@ -1459,7 +1463,7 @@ int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file, int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value); int bpf_get_file_flag(int flags); -int bpf_check_uarg_tail_zero(void __user *uaddr, size_t expected_size, +int bpf_check_uarg_tail_zero(bpfptr_t uaddr, size_t expected_size, size_t actual_size); /* memcpy that is used with 8-byte aligned pointers, power-of-8 size and @@ -1479,8 +1483,7 @@ static inline void bpf_long_memcpy(void *dst, const void *src, u32 size) } /* verify correctness of eBPF program */ -int bpf_check(struct bpf_prog **fp, union bpf_attr *attr, - union bpf_attr __user *uattr); +int bpf_check(struct bpf_prog **fp, union bpf_attr *attr, bpfptr_t uattr); #ifndef CONFIG_BPF_JIT_ALWAYS_ON void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth); @@ -1499,8 +1502,13 @@ int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp, struct net_device *dev_rx); int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp, struct net_device *dev_rx); +int dev_map_enqueue_multi(struct xdp_buff *xdp, struct net_device *dev_rx, + struct bpf_map *map, bool exclude_ingress); int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb, struct bpf_prog *xdp_prog); +int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb, + struct bpf_prog *xdp_prog, struct bpf_map *map, + bool exclude_ingress); bool dev_map_can_have_prog(struct bpf_map *map); void __cpu_map_flush(void); @@ -1668,6 +1676,13 @@ int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp, return 0; } +static inline +int dev_map_enqueue_multi(struct xdp_buff *xdp, struct net_device *dev_rx, + struct bpf_map *map, bool exclude_ingress) +{ + return 0; +} + struct sk_buff; static inline int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, @@ -1677,6 +1692,14 @@ static inline int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, return 0; } +static inline +int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb, + struct bpf_prog *xdp_prog, struct bpf_map *map, + bool exclude_ingress) +{ + return 0; +} + static inline void __cpu_map_flush(void) { } @@ -1826,6 +1849,9 @@ static inline bool bpf_map_is_dev_bound(struct bpf_map *map) struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr); void bpf_map_offload_map_free(struct bpf_map *map); +int bpf_prog_test_run_syscall(struct bpf_prog *prog, + const union bpf_attr *kattr, + union bpf_attr __user *uattr); #else static inline int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr) @@ -1851,6 +1877,13 @@ static inline struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr) static inline void bpf_map_offload_map_free(struct bpf_map *map) { } + +static inline int bpf_prog_test_run_syscall(struct bpf_prog *prog, + const union bpf_attr *kattr, + union bpf_attr __user *uattr) +{ + return -ENOTSUPP; +} #endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */ #if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) @@ -1964,6 +1997,7 @@ extern const struct bpf_func_proto bpf_get_socket_ptr_cookie_proto; extern const struct bpf_func_proto bpf_task_storage_get_proto; extern const struct bpf_func_proto bpf_task_storage_delete_proto; extern const struct bpf_func_proto bpf_for_each_map_elem_proto; +extern const struct bpf_func_proto bpf_btf_find_by_name_kind_proto; const struct bpf_func_proto *bpf_tracing_func_proto( enum bpf_func_id func_id, const struct bpf_prog *prog); @@ -2015,6 +2049,7 @@ struct sk_reuseport_kern { struct sk_buff *skb; struct sock *sk; struct sock *selected_sk; + struct sock *migrating_sk; void *data_end; u32 hash; u32 reuseport_id; diff --git a/include/linux/bpf_local_storage.h b/include/linux/bpf_local_storage.h index b902c580c4..24496bc28e 100644 --- a/include/linux/bpf_local_storage.h +++ b/include/linux/bpf_local_storage.h @@ -58,7 +58,7 @@ struct bpf_local_storage_data { * from the object's bpf_local_storage. * * Put it in the same cacheline as the data to minimize - * the number of cachelines access during the cache hit case. + * the number of cachelines accessed during the cache hit case. */ struct bpf_local_storage_map __rcu *smap; u8 data[] __aligned(8); @@ -71,7 +71,7 @@ struct bpf_local_storage_elem { struct bpf_local_storage __rcu *local_storage; struct rcu_head rcu; /* 8 bytes hole */ - /* The data is stored in aother cacheline to minimize + /* The data is stored in another cacheline to minimize * the number of cachelines access during a cache hit. */ struct bpf_local_storage_data sdata ____cacheline_aligned; diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h index f883f01a50..a9db1eae67 100644 --- a/include/linux/bpf_types.h +++ b/include/linux/bpf_types.h @@ -77,6 +77,8 @@ BPF_PROG_TYPE(BPF_PROG_TYPE_LSM, lsm, void *, void *) #endif /* CONFIG_BPF_LSM */ #endif +BPF_PROG_TYPE(BPF_PROG_TYPE_SYSCALL, bpf_syscall, + void *, void *) BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY, array_map_ops) BPF_MAP_TYPE(BPF_MAP_TYPE_PERCPU_ARRAY, percpu_array_map_ops) diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 06841517ab..e774ecc1cd 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -215,6 +215,13 @@ struct bpf_idx_pair { u32 idx; }; +struct bpf_id_pair { + u32 old; + u32 cur; +}; + +/* Maximum number of register states that can exist at once */ +#define BPF_ID_MAP_SIZE (MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE) #define MAX_CALL_FRAMES 8 struct bpf_verifier_state { /* call stack tracking */ @@ -418,6 +425,7 @@ struct bpf_verifier_env { const struct bpf_line_info *prev_linfo; struct bpf_verifier_log log; struct bpf_subprog_info subprog_info[BPF_MAX_SUBPROGS + 1]; + struct bpf_id_pair idmap_scratch[BPF_ID_MAP_SIZE]; struct { int *insn_state; int *insn_stack; @@ -442,6 +450,7 @@ struct bpf_verifier_env { u32 peak_states; /* longest register parentage chain walked for liveness marking */ u32 longest_mark_read_walk; + bpfptr_t fd_array; }; __printf(2, 0) void bpf_verifier_vlog(struct bpf_verifier_log *log, diff --git a/include/linux/bpfptr.h b/include/linux/bpfptr.h new file mode 100644 index 0000000000..5cdeab497c --- /dev/null +++ b/include/linux/bpfptr.h @@ -0,0 +1,75 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* A pointer that can point to either kernel or userspace memory. */ +#ifndef _LINUX_BPFPTR_H +#define _LINUX_BPFPTR_H + +#include + +typedef sockptr_t bpfptr_t; + +static inline bool bpfptr_is_kernel(bpfptr_t bpfptr) +{ + return bpfptr.is_kernel; +} + +static inline bpfptr_t KERNEL_BPFPTR(void *p) +{ + return (bpfptr_t) { .kernel = p, .is_kernel = true }; +} + +static inline bpfptr_t USER_BPFPTR(void __user *p) +{ + return (bpfptr_t) { .user = p }; +} + +static inline bpfptr_t make_bpfptr(u64 addr, bool is_kernel) +{ + if (is_kernel) + return KERNEL_BPFPTR((void*) (uintptr_t) addr); + else + return USER_BPFPTR(u64_to_user_ptr(addr)); +} + +static inline bool bpfptr_is_null(bpfptr_t bpfptr) +{ + if (bpfptr_is_kernel(bpfptr)) + return !bpfptr.kernel; + return !bpfptr.user; +} + +static inline void bpfptr_add(bpfptr_t *bpfptr, size_t val) +{ + if (bpfptr_is_kernel(*bpfptr)) + bpfptr->kernel += val; + else + bpfptr->user += val; +} + +static inline int copy_from_bpfptr_offset(void *dst, bpfptr_t src, + size_t offset, size_t size) +{ + return copy_from_sockptr_offset(dst, (sockptr_t) src, offset, size); +} + +static inline int copy_from_bpfptr(void *dst, bpfptr_t src, size_t size) +{ + return copy_from_bpfptr_offset(dst, src, 0, size); +} + +static inline int copy_to_bpfptr_offset(bpfptr_t dst, size_t offset, + const void *src, size_t size) +{ + return copy_to_sockptr_offset((sockptr_t) dst, offset, src, size); +} + +static inline void *memdup_bpfptr(bpfptr_t src, size_t len) +{ + return memdup_sockptr((sockptr_t) src, len); +} + +static inline long strncpy_from_bpfptr(char *dst, bpfptr_t src, size_t count) +{ + return strncpy_from_sockptr(dst, (sockptr_t) src, count); +} + +#endif /* _LINUX_BPFPTR_H */ diff --git a/include/linux/btf.h b/include/linux/btf.h index 3bac66e018..94a0c976c9 100644 --- a/include/linux/btf.h +++ b/include/linux/btf.h @@ -21,7 +21,7 @@ extern const struct file_operations btf_fops; void btf_get(struct btf *btf); void btf_put(struct btf *btf); -int btf_new_fd(const union bpf_attr *attr); +int btf_new_fd(const union bpf_attr *attr, bpfptr_t uattr); struct btf *btf_get_by_fd(int fd); int btf_get_info_by_fd(const struct btf *btf, const union bpf_attr *attr, diff --git a/include/linux/buildid.h b/include/linux/buildid.h index 40232f90db..3b7a0ff464 100644 --- a/include/linux/buildid.h +++ b/include/linux/buildid.h @@ -8,5 +8,13 @@ int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id, __u32 *size); +int build_id_parse_buf(const void *buf, unsigned char *build_id, u32 buf_size); + +#if IS_ENABLED(CONFIG_STACKTRACE_BUILD_ID) || IS_ENABLED(CONFIG_CRASH_CORE) +extern unsigned char vmlinux_build_id[BUILD_ID_SIZE_MAX]; +void init_vmlinux_build_id(void); +#else +static inline void init_vmlinux_build_id(void) { } +#endif #endif diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index fb8f6d2cd1..e1c705fdfa 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -71,6 +71,9 @@ enum { /* Cgroup is frozen. */ CGRP_FROZEN, + + /* Control group has to be killed. */ + CGRP_KILL, }; /* cgroup_root->flags */ @@ -110,6 +113,7 @@ enum { CFTYPE_NO_PREFIX = (1 << 3), /* (DON'T USE FOR NEW FILES) no subsys prefix */ CFTYPE_WORLD_WRITABLE = (1 << 4), /* (DON'T USE FOR NEW FILES) S_IWUGO */ CFTYPE_DEBUG = (1 << 5), /* create when cgroup_debug */ + CFTYPE_PRESSURE = (1 << 6), /* only if pressure feature is enabled */ /* internal flags, do not use outside cgroup core proper */ __CFTYPE_ONLY_ON_DFL = (1 << 16), /* only on default hierarchy */ diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 4f2f79de08..7bf60454a3 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -32,7 +32,7 @@ struct kernel_clone_args; #ifdef CONFIG_CGROUPS /* - * All weight knobs on the default hierarhcy should use the following min, + * All weight knobs on the default hierarchy should use the following min, * default and max values. The default value is the logarithmic center of * MIN and MAX and allows 100x to be expressed in both directions. */ @@ -676,6 +676,8 @@ static inline struct psi_group *cgroup_psi(struct cgroup *cgrp) return &cgrp->psi; } +bool cgroup_psi_enabled(void); + static inline void cgroup_init_kthreadd(void) { /* @@ -696,6 +698,7 @@ static inline void cgroup_kthread_ready(void) } void cgroup_path_from_kernfs_id(u64 id, char *buf, size_t buflen); +struct cgroup *cgroup_get_from_id(u64 id); #else /* !CONFIG_CGROUPS */ struct cgroup_subsys_state; @@ -735,6 +738,11 @@ static inline struct psi_group *cgroup_psi(struct cgroup *cgrp) return NULL; } +static inline bool cgroup_psi_enabled(void) +{ + return false; +} + static inline bool task_under_cgroup_hierarchy(struct task_struct *task, struct cgroup *ancestor) { @@ -743,6 +751,11 @@ static inline bool task_under_cgroup_hierarchy(struct task_struct *task, static inline void cgroup_path_from_kernfs_id(u64 id, char *buf, size_t buflen) {} + +static inline struct cgroup *cgroup_get_from_id(u64 id) +{ + return NULL; +} #endif /* !CONFIG_CGROUPS */ #ifdef CONFIG_CGROUPS @@ -906,20 +919,6 @@ void cgroup_freeze(struct cgroup *cgrp, bool freeze); void cgroup_freezer_migrate_task(struct task_struct *task, struct cgroup *src, struct cgroup *dst); -static inline bool cgroup_task_freeze(struct task_struct *task) -{ - bool ret; - - if (task->flags & PF_KTHREAD) - return false; - - rcu_read_lock(); - ret = test_bit(CGRP_FREEZE, &task_dfl_cgroup(task)->flags); - rcu_read_unlock(); - - return ret; -} - static inline bool cgroup_task_frozen(struct task_struct *task) { return task->frozen; @@ -929,10 +928,6 @@ static inline bool cgroup_task_frozen(struct task_struct *task) static inline void cgroup_enter_frozen(void) { } static inline void cgroup_leave_frozen(bool always_leave) { } -static inline bool cgroup_task_freeze(struct task_struct *task) -{ - return false; -} static inline bool cgroup_task_frozen(struct task_struct *task) { return false; diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h index 162a2e5546..d83b829305 100644 --- a/include/linux/clk-provider.h +++ b/include/linux/clk-provider.h @@ -629,6 +629,12 @@ long divider_ro_round_rate_parent(struct clk_hw *hw, struct clk_hw *parent, unsigned long rate, unsigned long *prate, const struct clk_div_table *table, u8 width, unsigned long flags, unsigned int val); +int divider_determine_rate(struct clk_hw *hw, struct clk_rate_request *req, + const struct clk_div_table *table, u8 width, + unsigned long flags); +int divider_ro_determine_rate(struct clk_hw *hw, struct clk_rate_request *req, + const struct clk_div_table *table, u8 width, + unsigned long flags, unsigned int val); int divider_get_val(unsigned long rate, unsigned long parent_rate, const struct clk_div_table *table, u8 width, unsigned long flags); diff --git a/include/linux/clk/tegra.h b/include/linux/clk/tegra.h index f7ff722a03..d128ad1570 100644 --- a/include/linux/clk/tegra.h +++ b/include/linux/clk/tegra.h @@ -123,20 +123,6 @@ static inline void tegra_cpu_clock_resume(void) } #endif -extern int tegra210_plle_hw_sequence_start(void); -extern bool tegra210_plle_hw_sequence_is_enabled(void); -extern void tegra210_xusb_pll_hw_control_enable(void); -extern void tegra210_xusb_pll_hw_sequence_start(void); -extern void tegra210_sata_pll_hw_control_enable(void); -extern void tegra210_sata_pll_hw_sequence_start(void); -extern void tegra210_set_sata_pll_seq_sw(bool state); -extern void tegra210_put_utmipll_in_iddq(void); -extern void tegra210_put_utmipll_out_iddq(void); -extern int tegra210_clk_handle_mbist_war(unsigned int id); -extern void tegra210_clk_emc_dll_enable(bool flag); -extern void tegra210_clk_emc_dll_update_setting(u32 emc_dll_src_value); -extern void tegra210_clk_emc_update_setting(u32 emc_src_value); - struct clk; struct tegra_emc; @@ -144,17 +130,10 @@ typedef long (tegra20_clk_emc_round_cb)(unsigned long rate, unsigned long min_rate, unsigned long max_rate, void *arg); - -void tegra20_clk_set_emc_round_callback(tegra20_clk_emc_round_cb *round_cb, - void *cb_arg); -int tegra20_clk_prepare_emc_mc_same_freq(struct clk *emc_clk, bool same); - typedef int (tegra124_emc_prepare_timing_change_cb)(struct tegra_emc *emc, unsigned long rate); typedef void (tegra124_emc_complete_timing_change_cb)(struct tegra_emc *emc, unsigned long rate); -void tegra124_clk_set_emc_callbacks(tegra124_emc_prepare_timing_change_cb *prep_cb, - tegra124_emc_complete_timing_change_cb *complete_cb); struct tegra210_clk_emc_config { unsigned long rate; @@ -176,8 +155,87 @@ struct tegra210_clk_emc_provider { const struct tegra210_clk_emc_config *config); }; +#if defined(CONFIG_ARCH_TEGRA_2x_SOC) || defined(CONFIG_ARCH_TEGRA_3x_SOC) +void tegra20_clk_set_emc_round_callback(tegra20_clk_emc_round_cb *round_cb, + void *cb_arg); +int tegra20_clk_prepare_emc_mc_same_freq(struct clk *emc_clk, bool same); +#else +static inline void +tegra20_clk_set_emc_round_callback(tegra20_clk_emc_round_cb *round_cb, + void *cb_arg) +{ +} + +static inline int +tegra20_clk_prepare_emc_mc_same_freq(struct clk *emc_clk, bool same) +{ + return 0; +} +#endif + +#ifdef CONFIG_TEGRA124_CLK_EMC +void tegra124_clk_set_emc_callbacks(tegra124_emc_prepare_timing_change_cb *prep_cb, + tegra124_emc_complete_timing_change_cb *complete_cb); +#else +static inline void +tegra124_clk_set_emc_callbacks(tegra124_emc_prepare_timing_change_cb *prep_cb, + tegra124_emc_complete_timing_change_cb *complete_cb) +{ +} +#endif + +#ifdef CONFIG_ARCH_TEGRA_210_SOC +int tegra210_plle_hw_sequence_start(void); +bool tegra210_plle_hw_sequence_is_enabled(void); +void tegra210_xusb_pll_hw_control_enable(void); +void tegra210_xusb_pll_hw_sequence_start(void); +void tegra210_sata_pll_hw_control_enable(void); +void tegra210_sata_pll_hw_sequence_start(void); +void tegra210_set_sata_pll_seq_sw(bool state); +void tegra210_put_utmipll_in_iddq(void); +void tegra210_put_utmipll_out_iddq(void); +int tegra210_clk_handle_mbist_war(unsigned int id); +void tegra210_clk_emc_dll_enable(bool flag); +void tegra210_clk_emc_dll_update_setting(u32 emc_dll_src_value); +void tegra210_clk_emc_update_setting(u32 emc_src_value); + int tegra210_clk_emc_attach(struct clk *clk, struct tegra210_clk_emc_provider *provider); void tegra210_clk_emc_detach(struct clk *clk); +#else +static inline int tegra210_plle_hw_sequence_start(void) +{ + return 0; +} + +static inline bool tegra210_plle_hw_sequence_is_enabled(void) +{ + return false; +} + +static inline int tegra210_clk_handle_mbist_war(unsigned int id) +{ + return 0; +} + +static inline int +tegra210_clk_emc_attach(struct clk *clk, + struct tegra210_clk_emc_provider *provider) +{ + return 0; +} + +static inline void tegra210_xusb_pll_hw_control_enable(void) {} +static inline void tegra210_xusb_pll_hw_sequence_start(void) {} +static inline void tegra210_sata_pll_hw_control_enable(void) {} +static inline void tegra210_sata_pll_hw_sequence_start(void) {} +static inline void tegra210_set_sata_pll_seq_sw(bool state) {} +static inline void tegra210_put_utmipll_in_iddq(void) {} +static inline void tegra210_put_utmipll_out_iddq(void) {} +static inline void tegra210_clk_emc_dll_enable(bool flag) {} +static inline void tegra210_clk_emc_dll_update_setting(u32 emc_dll_src_value) {} +static inline void tegra210_clk_emc_update_setting(u32 emc_src_value) {} +static inline void tegra210_clk_emc_detach(struct clk *clk) {} +#endif #endif /* __LINUX_CLK_TEGRA_H_ */ diff --git a/include/linux/clk/ti.h b/include/linux/clk/ti.h index c62f6fa676..3486f20a37 100644 --- a/include/linux/clk/ti.h +++ b/include/linux/clk/ti.h @@ -63,6 +63,17 @@ struct clk_omap_reg { * @auto_recal_bit: bitshift of the driftguard enable bit in @control_reg * @recal_en_bit: bitshift of the PRM_IRQENABLE_* bit for recalibration IRQs * @recal_st_bit: bitshift of the PRM_IRQSTATUS_* bit for recalibration IRQs + * @ssc_deltam_reg: register containing the DPLL SSC frequency spreading + * @ssc_modfreq_reg: register containing the DPLL SSC modulation frequency + * @ssc_modfreq_mant_mask: mask of the mantissa component in @ssc_modfreq_reg + * @ssc_modfreq_exp_mask: mask of the exponent component in @ssc_modfreq_reg + * @ssc_enable_mask: mask of the DPLL SSC enable bit in @control_reg + * @ssc_downspread_mask: mask of the DPLL SSC low frequency only bit in + * @control_reg + * @ssc_modfreq: the DPLL SSC frequency modulation in kHz + * @ssc_deltam: the DPLL SSC frequency spreading in permille (10th of percent) + * @ssc_downspread: require the only low frequency spread of the DPLL in SSC + * mode * @flags: DPLL type/features (see below) * * Possible values for @flags: @@ -110,6 +121,17 @@ struct dpll_data { u8 auto_recal_bit; u8 recal_en_bit; u8 recal_st_bit; + struct clk_omap_reg ssc_deltam_reg; + struct clk_omap_reg ssc_modfreq_reg; + u32 ssc_deltam_int_mask; + u32 ssc_deltam_frac_mask; + u32 ssc_modfreq_mant_mask; + u32 ssc_modfreq_exp_mask; + u32 ssc_enable_mask; + u32 ssc_downspread_mask; + u32 ssc_modfreq; + u32 ssc_deltam; + bool ssc_downspread; u8 flags; }; diff --git a/include/linux/clkdev.h b/include/linux/clkdev.h index fd06b2780a..8a8423eb8e 100644 --- a/include/linux/clkdev.h +++ b/include/linux/clkdev.h @@ -30,11 +30,6 @@ struct clk_lookup { .clk = c, \ } -struct clk_lookup *clkdev_alloc(struct clk *clk, const char *con_id, - const char *dev_fmt, ...) __printf(3, 4); -struct clk_lookup *clkdev_hw_alloc(struct clk_hw *hw, const char *con_id, - const char *dev_fmt, ...) __printf(3, 4); - void clkdev_add(struct clk_lookup *cl); void clkdev_drop(struct clk_lookup *cl); diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h index d6ab416ee2..1d42d4b173 100644 --- a/include/linux/clocksource.h +++ b/include/linux/clocksource.h @@ -43,6 +43,8 @@ struct module; * @shift: Cycle to nanosecond divisor (power of two) * @max_idle_ns: Maximum idle time permitted by the clocksource (nsecs) * @maxadj: Maximum adjustment value to mult (~11%) + * @uncertainty_margin: Maximum uncertainty in nanoseconds per half second. + * Zero says to use default WATCHDOG_THRESHOLD. * @archdata: Optional arch-specific data * @max_cycles: Maximum safe cycle value which won't overflow on * multiplication @@ -98,6 +100,7 @@ struct clocksource { u32 shift; u64 max_idle_ns; u32 maxadj; + u32 uncertainty_margin; #ifdef CONFIG_ARCH_CLOCKSOURCE_DATA struct arch_clocksource_data archdata; #endif @@ -137,7 +140,7 @@ struct clocksource { #define CLOCK_SOURCE_UNSTABLE 0x40 #define CLOCK_SOURCE_SUSPEND_NONSTOP 0x80 #define CLOCK_SOURCE_RESELECT 0x100 - +#define CLOCK_SOURCE_VERIFY_PERCPU 0x200 /* simplify initialization of mask field */ #define CLOCKSOURCE_MASK(bits) GENMASK_ULL((bits) - 1, 0) @@ -288,4 +291,7 @@ static inline void timer_probe(void) {} #define TIMER_ACPI_DECLARE(name, table_id, fn) \ ACPI_DECLARE_PROBE_ENTRY(timer, name, table_id, 0, NULL, 0, fn) +extern ulong max_cswd_read_retries; +void clocksource_verify_percpu(struct clocksource *cs); + #endif /* _LINUX_CLOCKSOURCE_H */ diff --git a/include/linux/compaction.h b/include/linux/compaction.h index 4221888bdc..c24098c7ac 100644 --- a/include/linux/compaction.h +++ b/include/linux/compaction.h @@ -35,12 +35,12 @@ enum compact_result { COMPACT_CONTINUE, /* - * The full zone was compacted scanned but wasn't successfull to compact + * The full zone was compacted scanned but wasn't successful to compact * suitable pages. */ COMPACT_COMPLETE, /* - * direct compaction has scanned part of the zone but wasn't successfull + * direct compaction has scanned part of the zone but wasn't successful * to compact suitable pages. */ COMPACT_PARTIAL_SKIPPED, diff --git a/include/linux/compat.h b/include/linux/compat.h index 8855b1b702..c270124e44 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h @@ -532,8 +532,6 @@ int __compat_save_altstack(compat_stack_t __user *, unsigned long); &__uss->ss_sp, label); \ unsafe_put_user(t->sas_ss_flags, &__uss->ss_flags, label); \ unsafe_put_user(t->sas_ss_size, &__uss->ss_size, label); \ - if (t->sas_ss_flags & SS_AUTODISARM) \ - sas_ss_reset(t); \ } while (0); /* diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h index adbe76b203..49b0ac8b6f 100644 --- a/include/linux/compiler-clang.h +++ b/include/linux/compiler-clang.h @@ -13,6 +13,12 @@ /* all clang versions usable with the kernel support KASAN ABI version 5 */ #define KASAN_ABI_VERSION 5 +/* + * Note: Checking __has_feature(*_sanitizer) is only true if the feature is + * enabled. Therefore it is not required to additionally check defined(CONFIG_*) + * to avoid adding redundant attributes in other configurations. + */ + #if __has_feature(address_sanitizer) || __has_feature(hwaddress_sanitizer) /* Emulate GCC's __SANITIZE_ADDRESS__ flag */ #define __SANITIZE_ADDRESS__ @@ -45,6 +51,17 @@ #define __no_sanitize_undefined #endif +/* + * Support for __has_feature(coverage_sanitizer) was added in Clang 13 together + * with no_sanitize("coverage"). Prior versions of Clang support coverage + * instrumentation, but cannot be queried for support by the preprocessor. + */ +#if __has_feature(coverage_sanitizer) +#define __no_sanitize_coverage __attribute__((no_sanitize("coverage"))) +#else +#define __no_sanitize_coverage +#endif + /* * Not all versions of clang implement the type-generic versions * of the builtin overflow checkers. Fortunately, clang implements diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index 5d97ef738a..cb9217fc60 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -122,6 +122,12 @@ #define __no_sanitize_undefined #endif +#if defined(CONFIG_KCOV) && __has_attribute(__no_sanitize_coverage__) +#define __no_sanitize_coverage __attribute__((no_sanitize_coverage)) +#else +#define __no_sanitize_coverage +#endif + #if GCC_VERSION >= 50100 #define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1 #endif diff --git a/include/linux/compiler.h b/include/linux/compiler.h index df5b405e63..b67261a1e3 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -115,18 +115,24 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, * The __COUNTER__ based labels are a hack to make each instance of the macros * unique, to convince GCC not to merge duplicate inline asm statements. */ -#define annotate_reachable() ({ \ - asm volatile("%c0:\n\t" \ +#define __stringify_label(n) #n + +#define __annotate_reachable(c) ({ \ + asm volatile(__stringify_label(c) ":\n\t" \ ".pushsection .discard.reachable\n\t" \ - ".long %c0b - .\n\t" \ - ".popsection\n\t" : : "i" (__COUNTER__)); \ + ".long " __stringify_label(c) "b - .\n\t" \ + ".popsection\n\t"); \ }) -#define annotate_unreachable() ({ \ - asm volatile("%c0:\n\t" \ +#define annotate_reachable() __annotate_reachable(__COUNTER__) + +#define __annotate_unreachable(c) ({ \ + asm volatile(__stringify_label(c) ":\n\t" \ ".pushsection .discard.unreachable\n\t" \ - ".long %c0b - .\n\t" \ - ".popsection\n\t" : : "i" (__COUNTER__)); \ + ".long " __stringify_label(c) "b - .\n\t" \ + ".popsection\n\t"); \ }) +#define annotate_unreachable() __annotate_unreachable(__COUNTER__) + #define ASM_UNREACHABLE \ "999:\n\t" \ ".pushsection .discard.unreachable\n\t" \ @@ -213,6 +219,16 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, __v; \ }) +/* + * With CONFIG_CFI_CLANG, the compiler replaces function addresses in + * instrumented C code with jump table addresses. Architectures that + * support CFI can define this macro to return the actual function address + * when needed. + */ +#ifndef function_nocfi +#define function_nocfi(x) (x) +#endif + #endif /* __KERNEL__ */ /* diff --git a/include/linux/compiler_attributes.h b/include/linux/compiler_attributes.h index 183ddd5fd0..2487be0e71 100644 --- a/include/linux/compiler_attributes.h +++ b/include/linux/compiler_attributes.h @@ -27,15 +27,16 @@ */ #ifndef __has_attribute # define __has_attribute(x) __GCC4_has_attribute_##x -# define __GCC4_has_attribute___assume_aligned__ (__GNUC_MINOR__ >= 9) +# define __GCC4_has_attribute___assume_aligned__ 1 # define __GCC4_has_attribute___copy__ 0 # define __GCC4_has_attribute___designated_init__ 0 # define __GCC4_has_attribute___externally_visible__ 1 # define __GCC4_has_attribute___no_caller_saved_registers__ 0 # define __GCC4_has_attribute___noclone__ 1 +# define __GCC4_has_attribute___no_profile_instrument_function__ 0 # define __GCC4_has_attribute___nonstring__ 0 -# define __GCC4_has_attribute___no_sanitize_address__ (__GNUC_MINOR__ >= 8) -# define __GCC4_has_attribute___no_sanitize_undefined__ (__GNUC_MINOR__ >= 9) +# define __GCC4_has_attribute___no_sanitize_address__ 1 +# define __GCC4_has_attribute___no_sanitize_undefined__ 1 # define __GCC4_has_attribute___fallthrough__ 0 #endif @@ -238,6 +239,18 @@ # define __nonstring #endif +/* + * Optional: only supported since GCC >= 7.1, clang >= 13.0. + * + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-no_005fprofile_005finstrument_005ffunction-function-attribute + * clang: https://clang.llvm.org/docs/AttributeReference.html#no-profile-instrument-function + */ +#if __has_attribute(__no_profile_instrument_function__) +# define __no_profile __attribute__((__no_profile_instrument_function__)) +#else +# define __no_profile +#endif + /* * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-noreturn-function-attribute * clang: https://clang.llvm.org/docs/AttributeReference.html#noreturn diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h index d29bda7f6e..e4ea86fc58 100644 --- a/include/linux/compiler_types.h +++ b/include/linux/compiler_types.h @@ -210,7 +210,7 @@ struct ftrace_likely_data { /* Section for code which can't be instrumented at all */ #define noinstr \ noinline notrace __attribute((__section__(".noinstr.text"))) \ - __no_kcsan __no_sanitize_address + __no_kcsan __no_sanitize_address __no_profile __no_sanitize_coverage #endif /* __KERNEL__ */ diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 353969c7ac..9fd719475f 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -330,15 +330,6 @@ struct cpufreq_driver { unsigned long target_perf, unsigned long capacity); - /* - * Caches and returns the lowest driver-supported frequency greater than - * or equal to the target frequency, subject to any driver limitations. - * Does not set the frequency. Only to be implemented for drivers with - * target(). - */ - unsigned int (*resolve_freq)(struct cpufreq_policy *policy, - unsigned int target_freq); - /* * Only for drivers with target_index() and CPUFREQ_ASYNC_NOTIFICATION * unset. @@ -371,7 +362,6 @@ struct cpufreq_driver { int (*online)(struct cpufreq_policy *policy); int (*offline)(struct cpufreq_policy *policy); int (*exit)(struct cpufreq_policy *policy); - void (*stop_cpu)(struct cpufreq_policy *policy); int (*suspend)(struct cpufreq_policy *policy); int (*resume)(struct cpufreq_policy *policy); diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 4a62b39806..f39b34b138 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -54,7 +54,7 @@ enum cpuhp_state { CPUHP_MM_MEMCQ_DEAD, CPUHP_PERCPU_CNT_DEAD, CPUHP_RADIX_DEAD, - CPUHP_PAGE_ALLOC_DEAD, + CPUHP_PAGE_ALLOC, CPUHP_NET_DEV_DEAD, CPUHP_PCI_XGENE_DEAD, CPUHP_IOMMU_IOVA_DEAD, @@ -171,7 +171,6 @@ enum cpuhp_state { CPUHP_AP_PERF_X86_CSTATE_ONLINE, CPUHP_AP_PERF_X86_IDXD_ONLINE, CPUHP_AP_PERF_S390_CF_ONLINE, - CPUHP_AP_PERF_S390_CFD_ONLINE, CPUHP_AP_PERF_S390_SF_ONLINE, CPUHP_AP_PERF_ARM_CCI_ONLINE, CPUHP_AP_PERF_ARM_CCN_ONLINE, diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index bfc4690de4..f3689a52bf 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -259,7 +259,7 @@ extern int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool /** * for_each_cpu_wrap - iterate over every cpu in a mask, starting at a specified location * @cpu: the (optionally unsigned) integer iterator - * @mask: the cpumask poiter + * @mask: the cpumask pointer * @start: the start location * * The implementation does not assume any bit in @mask is set (including @start). diff --git a/include/linux/crash_core.h b/include/linux/crash_core.h index 206bde8308..de62a72243 100644 --- a/include/linux/crash_core.h +++ b/include/linux/crash_core.h @@ -38,8 +38,12 @@ phys_addr_t paddr_vmcoreinfo_note(void); #define VMCOREINFO_OSRELEASE(value) \ vmcoreinfo_append_str("OSRELEASE=%s\n", value) -#define VMCOREINFO_BUILD_ID(value) \ - vmcoreinfo_append_str("BUILD-ID=%s\n", value) +#define VMCOREINFO_BUILD_ID() \ + ({ \ + static_assert(sizeof(vmlinux_build_id) == 20); \ + vmcoreinfo_append_str("BUILD-ID=%20phN\n", vmlinux_build_id); \ + }) + #define VMCOREINFO_PAGESIZE(value) \ vmcoreinfo_append_str("PAGESIZE=%ld\n", value) #define VMCOREINFO_SYMBOL(name) \ @@ -69,10 +73,6 @@ extern unsigned char *vmcoreinfo_data; extern size_t vmcoreinfo_size; extern u32 *vmcoreinfo_note; -/* raw contents of kernel .notes section */ -extern const void __start_notes __weak; -extern const void __stop_notes __weak; - Elf_Word *append_elf_note(Elf_Word *buf, char *name, unsigned int type, void *data, size_t data_len); void final_note(Elf_Word *buf); diff --git a/include/linux/cred.h b/include/linux/cred.h index 14971322e1..fcbc6885cc 100644 --- a/include/linux/cred.h +++ b/include/linux/cred.h @@ -143,6 +143,7 @@ struct cred { #endif struct user_struct *user; /* real user ID subscription */ struct user_namespace *user_ns; /* user_ns the caps and keyrings are relative to. */ + struct ucounts *ucounts; struct group_info *group_info; /* supplementary groups for euid/fsgid */ /* RCU deletion */ union { @@ -169,6 +170,7 @@ extern int set_security_override_from_ctx(struct cred *, const char *); extern int set_create_files_as(struct cred *, struct inode *); extern int cred_fscmp(const struct cred *, const struct cred *); extern void __init cred_init(void); +extern int set_cred_ucounts(struct cred *); /* * check for validity of credentials @@ -369,6 +371,7 @@ static inline void put_cred(const struct cred *_cred) #define task_uid(task) (task_cred_xxx((task), uid)) #define task_euid(task) (task_cred_xxx((task), euid)) +#define task_ucounts(task) (task_cred_xxx((task), ucounts)) #define current_cred_xxx(xxx) \ ({ \ @@ -385,6 +388,7 @@ static inline void put_cred(const struct cred *_cred) #define current_fsgid() (current_cred_xxx(fsgid)) #define current_cap() (current_cred_xxx(cap_effective)) #define current_user() (current_cred_xxx(user)) +#define current_ucounts() (current_cred_xxx(ucounts)) extern struct user_namespace init_user_ns; #ifdef CONFIG_USER_NS diff --git a/include/linux/crypto.h b/include/linux/crypto.h index da5e0d74bb..855869e1fd 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -643,32 +643,6 @@ struct crypto_comp { struct crypto_tfm base; }; -enum { - CRYPTOA_UNSPEC, - CRYPTOA_ALG, - CRYPTOA_TYPE, - CRYPTOA_U32, - __CRYPTOA_MAX, -}; - -#define CRYPTOA_MAX (__CRYPTOA_MAX - 1) - -/* Maximum number of (rtattr) parameters for each template. */ -#define CRYPTO_MAX_ATTRS 32 - -struct crypto_attr_alg { - char name[CRYPTO_MAX_ALG_NAME]; -}; - -struct crypto_attr_type { - u32 type; - u32 mask; -}; - -struct crypto_attr_u32 { - u32 num; -}; - /* * Transform user interface. */ diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h index 1fdb4343af..c869f1e73d 100644 --- a/include/linux/debugfs.h +++ b/include/linux/debugfs.h @@ -112,8 +112,8 @@ void debugfs_create_u32(const char *name, umode_t mode, struct dentry *parent, u32 *value); void debugfs_create_u64(const char *name, umode_t mode, struct dentry *parent, u64 *value); -struct dentry *debugfs_create_ulong(const char *name, umode_t mode, - struct dentry *parent, unsigned long *value); +void debugfs_create_ulong(const char *name, umode_t mode, struct dentry *parent, + unsigned long *value); void debugfs_create_x8(const char *name, umode_t mode, struct dentry *parent, u8 *value); void debugfs_create_x16(const char *name, umode_t mode, struct dentry *parent, @@ -126,8 +126,8 @@ void debugfs_create_size_t(const char *name, umode_t mode, struct dentry *parent, size_t *value); void debugfs_create_atomic_t(const char *name, umode_t mode, struct dentry *parent, atomic_t *value); -struct dentry *debugfs_create_bool(const char *name, umode_t mode, - struct dentry *parent, bool *value); +void debugfs_create_bool(const char *name, umode_t mode, struct dentry *parent, + bool *value); void debugfs_create_str(const char *name, umode_t mode, struct dentry *parent, char **value); @@ -266,13 +266,9 @@ static inline void debugfs_create_u32(const char *name, umode_t mode, static inline void debugfs_create_u64(const char *name, umode_t mode, struct dentry *parent, u64 *value) { } -static inline struct dentry *debugfs_create_ulong(const char *name, - umode_t mode, - struct dentry *parent, - unsigned long *value) -{ - return ERR_PTR(-ENODEV); -} +static inline void debugfs_create_ulong(const char *name, umode_t mode, + struct dentry *parent, + unsigned long *value) { } static inline void debugfs_create_x8(const char *name, umode_t mode, struct dentry *parent, u8 *value) { } @@ -295,12 +291,8 @@ static inline void debugfs_create_atomic_t(const char *name, umode_t mode, atomic_t *value) { } -static inline struct dentry *debugfs_create_bool(const char *name, umode_t mode, - struct dentry *parent, - bool *value) -{ - return ERR_PTR(-ENODEV); -} +static inline void debugfs_create_bool(const char *name, umode_t mode, + struct dentry *parent, bool *value) { } static inline void debugfs_create_str(const char *name, umode_t mode, struct dentry *parent, diff --git a/include/linux/debugobjects.h b/include/linux/debugobjects.h index 8d2dde23e9..32444686b6 100644 --- a/include/linux/debugobjects.h +++ b/include/linux/debugobjects.h @@ -18,7 +18,7 @@ enum debug_obj_state { struct debug_obj_descr; /** - * struct debug_obj - representaion of an tracked object + * struct debug_obj - representation of an tracked object * @node: hlist node to link the object into the tracker list * @state: tracked object state * @astate: current active state diff --git a/include/linux/delayacct.h b/include/linux/delayacct.h index 21651f9467..af7e6eb502 100644 --- a/include/linux/delayacct.h +++ b/include/linux/delayacct.h @@ -58,16 +58,22 @@ struct task_delay_info { #include #include +#include #ifdef CONFIG_TASK_DELAY_ACCT +DECLARE_STATIC_KEY_FALSE(delayacct_key); extern int delayacct_on; /* Delay accounting turned on/off */ extern struct kmem_cache *delayacct_cache; extern void delayacct_init(void); + +extern int sysctl_delayacct(struct ctl_table *table, int write, void *buffer, + size_t *lenp, loff_t *ppos); + extern void __delayacct_tsk_init(struct task_struct *); extern void __delayacct_tsk_exit(struct task_struct *); extern void __delayacct_blkio_start(void); extern void __delayacct_blkio_end(struct task_struct *); -extern int __delayacct_add_tsk(struct taskstats *, struct task_struct *); +extern int delayacct_add_tsk(struct taskstats *, struct task_struct *); extern __u64 __delayacct_blkio_ticks(struct task_struct *); extern void __delayacct_freepages_start(void); extern void __delayacct_freepages_end(void); @@ -114,6 +120,9 @@ static inline void delayacct_tsk_free(struct task_struct *tsk) static inline void delayacct_blkio_start(void) { + if (!static_branch_unlikely(&delayacct_key)) + return; + delayacct_set_flag(current, DELAYACCT_PF_BLKIO); if (current->delays) __delayacct_blkio_start(); @@ -121,19 +130,14 @@ static inline void delayacct_blkio_start(void) static inline void delayacct_blkio_end(struct task_struct *p) { + if (!static_branch_unlikely(&delayacct_key)) + return; + if (p->delays) __delayacct_blkio_end(p); delayacct_clear_flag(p, DELAYACCT_PF_BLKIO); } -static inline int delayacct_add_tsk(struct taskstats *d, - struct task_struct *tsk) -{ - if (!delayacct_on || !tsk->delays) - return 0; - return __delayacct_add_tsk(d, tsk); -} - static inline __u64 delayacct_blkio_ticks(struct task_struct *tsk) { if (tsk->delays) diff --git a/include/linux/dev_printk.h b/include/linux/dev_printk.h index 6f009559ee..82d3d46005 100644 --- a/include/linux/dev_printk.h +++ b/include/linux/dev_printk.h @@ -236,7 +236,7 @@ do { \ * using WARN/WARN_ONCE to include file/line information and a backtrace. */ #define dev_WARN(dev, format, arg...) \ - WARN(1, "%s %s: " format, dev_driver_string(dev), dev_name(dev), ## arg); + WARN(1, "%s %s: " format, dev_driver_string(dev), dev_name(dev), ## arg) #define dev_WARN_ONCE(dev, condition, format, arg...) \ WARN_ONCE(condition, "%s %s: " format, \ diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index ff700fb6ce..7457d49acf 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -361,6 +361,12 @@ struct dm_target { * Set if we need to limit the number of in-flight bios when swapping. */ bool limit_swap_bios:1; + + /* + * Set if this target implements a a zoned device and needs emulation of + * zone append operations using regular writes. + */ + bool emulate_zone_append:1; }; void *dm_per_bio_data(struct bio *bio, size_t data_size); @@ -478,7 +484,8 @@ struct dm_report_zones_args { /* must be filled by ->report_zones before calling dm_report_zones_cb */ sector_t start; }; -int dm_report_zones_cb(struct blk_zone *zone, unsigned int idx, void *data); +int dm_report_zones(struct block_device *bdev, sector_t start, sector_t sector, + struct dm_report_zones_args *args, unsigned int nr_zones); #endif /* CONFIG_BLK_DEV_ZONED */ /* diff --git a/include/linux/device.h b/include/linux/device.h index f1a00040fa..59940f1744 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -165,21 +165,12 @@ void device_remove_bin_file(struct device *dev, typedef void (*dr_release_t)(struct device *dev, void *res); typedef int (*dr_match_t)(struct device *dev, void *res, void *match_data); -#ifdef CONFIG_DEBUG_DEVRES void *__devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, int nid, const char *name) __malloc; #define devres_alloc(release, size, gfp) \ __devres_alloc_node(release, size, gfp, NUMA_NO_NODE, #release) #define devres_alloc_node(release, size, gfp, nid) \ __devres_alloc_node(release, size, gfp, nid, #release) -#else -void *devres_alloc_node(dr_release_t release, size_t size, - gfp_t gfp, int nid) __malloc; -static inline void *devres_alloc(dr_release_t release, size_t size, gfp_t gfp) -{ - return devres_alloc_node(release, size, gfp, NUMA_NO_NODE); -} -#endif void devres_for_each_res(struct device *dev, dr_release_t release, dr_match_t match, void *match_data, @@ -350,6 +341,22 @@ enum dl_dev_state { DL_DEV_UNBINDING, }; +/** + * enum device_removable - Whether the device is removable. The criteria for a + * device to be classified as removable is determined by its subsystem or bus. + * @DEVICE_REMOVABLE_NOT_SUPPORTED: This attribute is not supported for this + * device (default). + * @DEVICE_REMOVABLE_UNKNOWN: Device location is Unknown. + * @DEVICE_FIXED: Device is not removable by the user. + * @DEVICE_REMOVABLE: Device is removable by the user. + */ +enum device_removable { + DEVICE_REMOVABLE_NOT_SUPPORTED = 0, /* must be 0 */ + DEVICE_REMOVABLE_UNKNOWN, + DEVICE_FIXED, + DEVICE_REMOVABLE, +}; + /** * struct dev_links_info - Device data related to device links. * @suppliers: List of links to supplier devices. @@ -399,7 +406,7 @@ struct dev_links_info { * along with subsystem-level and driver-level callbacks. * @em_pd: device's energy model performance domain * @pins: For device pin management. - * See Documentation/driver-api/pinctl.rst for details. + * See Documentation/driver-api/pin-control.rst for details. * @msi_list: Hosts MSI descriptors * @msi_domain: The generic MSI domain this device is using. * @numa_node: NUMA node this device is close to. @@ -431,6 +438,9 @@ struct dev_links_info { * device (i.e. the bus driver that discovered the device). * @iommu_group: IOMMU group the device belongs to. * @iommu: Per device generic IOMMU runtime data + * @removable: Whether the device can be removed from the system. This + * should be set by the subsystem / bus driver that discovered + * the device. * * @offline_disabled: If set, the device is permanently online. * @offline: Set after successful invocation of bus type's .offline(). @@ -544,6 +554,8 @@ struct device { struct iommu_group *iommu_group; struct dev_iommu *iommu; + enum device_removable removable; + bool offline_disabled:1; bool offline:1; bool of_node_reused:1; @@ -780,6 +792,22 @@ static inline bool dev_has_sync_state(struct device *dev) return false; } +static inline void dev_set_removable(struct device *dev, + enum device_removable removable) +{ + dev->removable = removable; +} + +static inline bool dev_is_removable(struct device *dev) +{ + return dev->removable == DEVICE_REMOVABLE; +} + +static inline bool dev_removable_is_valid(struct device *dev) +{ + return dev->removable != DEVICE_REMOVABLE_NOT_SUPPORTED; +} + /* * High level routines for use by the bus drivers */ @@ -817,6 +845,7 @@ int device_online(struct device *dev); void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode); void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode); void device_set_of_node_from_dev(struct device *dev, const struct device *dev2); +void device_set_node(struct device *dev, struct fwnode_handle *fwnode); static inline int dev_num_vf(struct device *dev) { @@ -845,6 +874,8 @@ static inline void *dev_get_platdata(const struct device *dev) * Manual binding of a device to driver. See drivers/base/bus.c * for information on use. */ +int __must_check device_driver_attach(struct device_driver *drv, + struct device *dev); int __must_check device_bind_driver(struct device *dev); void device_release_driver(struct device *dev); int __must_check device_attach(struct device *dev); diff --git a/include/linux/devm-helpers.h b/include/linux/devm-helpers.h index f40f77717a..7489180220 100644 --- a/include/linux/devm-helpers.h +++ b/include/linux/devm-helpers.h @@ -51,4 +51,29 @@ static inline int devm_delayed_work_autocancel(struct device *dev, return devm_add_action(dev, devm_delayed_work_drop, w); } +static inline void devm_work_drop(void *res) +{ + cancel_work_sync(res); +} + +/** + * devm_work_autocancel - Resource-managed work allocation + * @dev: Device which lifetime work is bound to + * @w: Work to be added (and automatically cancelled) + * @worker: Worker function + * + * Initialize work which is automatically cancelled when driver is detached. + * A few drivers need to queue work which must be cancelled before driver + * is detached to avoid accessing removed resources. + * devm_work_autocancel() can be used to omit the explicit + * cancelleation when driver is detached. + */ +static inline int devm_work_autocancel(struct device *dev, + struct work_struct *w, + work_func_t worker) +{ + INIT_WORK(w, worker); + return devm_add_action(dev, devm_work_drop, w); +} + #endif diff --git a/include/linux/dm-kcopyd.h b/include/linux/dm-kcopyd.h index e42de7750c..c1707ee5b5 100644 --- a/include/linux/dm-kcopyd.h +++ b/include/linux/dm-kcopyd.h @@ -51,6 +51,7 @@ MODULE_PARM_DESC(name, description) struct dm_kcopyd_client; struct dm_kcopyd_client *dm_kcopyd_client_create(struct dm_kcopyd_throttle *throttle); void dm_kcopyd_client_destroy(struct dm_kcopyd_client *kc); +void dm_kcopyd_client_flush(struct dm_kcopyd_client *kc); /* * Submit a copy job to kcopyd. This is built on top of the diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h index 6e75a2d689..758ca46942 100644 --- a/include/linux/dma-iommu.h +++ b/include/linux/dma-iommu.h @@ -19,7 +19,7 @@ int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base); void iommu_put_dma_cookie(struct iommu_domain *domain); /* Setup call for arch DMA mapping code */ -void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size); +void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 dma_limit); /* The DMA API isn't _quite_ the whole story, though... */ /* @@ -50,7 +50,7 @@ struct msi_msg; struct device; static inline void iommu_setup_dma_ops(struct device *dev, u64 dma_base, - u64 size) + u64 dma_limit) { } diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h index d44a77e8a7..e1ca2080a1 100644 --- a/include/linux/dma-resv.h +++ b/include/linux/dma-resv.h @@ -78,19 +78,11 @@ struct dma_resv { #define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base) #define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base) -/** - * dma_resv_get_list - get the reservation object's - * shared fence list, with update-side lock held - * @obj: the reservation object - * - * Returns the shared fence list. Does NOT take references to - * the fence. The obj->lock must be held. - */ -static inline struct dma_resv_list *dma_resv_get_list(struct dma_resv *obj) -{ - return rcu_dereference_protected(obj->fence, - dma_resv_held(obj)); -} +#ifdef CONFIG_DEBUG_MUTEXES +void dma_resv_reset_shared_max(struct dma_resv *obj); +#else +static inline void dma_resv_reset_shared_max(struct dma_resv *obj) {} +#endif /** * dma_resv_lock - lock the reservation object @@ -215,38 +207,29 @@ static inline struct ww_acquire_ctx *dma_resv_locking_ctx(struct dma_resv *obj) */ static inline void dma_resv_unlock(struct dma_resv *obj) { -#ifdef CONFIG_DEBUG_MUTEXES - /* Test shared fence slot reservation */ - if (rcu_access_pointer(obj->fence)) { - struct dma_resv_list *fence = dma_resv_get_list(obj); - - fence->shared_max = fence->shared_count; - } -#endif + dma_resv_reset_shared_max(obj); ww_mutex_unlock(&obj->lock); } /** - * dma_resv_get_excl - get the reservation object's - * exclusive fence, with update-side lock held + * dma_resv_excl_fence - return the object's exclusive fence * @obj: the reservation object * - * Returns the exclusive fence (if any). Does NOT take a - * reference. Writers must hold obj->lock, readers may only - * hold a RCU read side lock. + * Returns the exclusive fence (if any). Caller must either hold the objects + * through dma_resv_lock() or the RCU read side lock through rcu_read_lock(), + * or one of the variants of each * * RETURNS * The exclusive fence or NULL */ static inline struct dma_fence * -dma_resv_get_excl(struct dma_resv *obj) +dma_resv_excl_fence(struct dma_resv *obj) { - return rcu_dereference_protected(obj->fence_excl, - dma_resv_held(obj)); + return rcu_dereference_check(obj->fence_excl, dma_resv_held(obj)); } /** - * dma_resv_get_excl_rcu - get the reservation object's + * dma_resv_get_excl_unlocked - get the reservation object's * exclusive fence, without lock held. * @obj: the reservation object * @@ -257,7 +240,7 @@ dma_resv_get_excl(struct dma_resv *obj) * The exclusive fence or NULL if none */ static inline struct dma_fence * -dma_resv_get_excl_rcu(struct dma_resv *obj) +dma_resv_get_excl_unlocked(struct dma_resv *obj) { struct dma_fence *fence; @@ -271,23 +254,29 @@ dma_resv_get_excl_rcu(struct dma_resv *obj) return fence; } +/** + * dma_resv_shared_list - get the reservation object's shared fence list + * @obj: the reservation object + * + * Returns the shared fence list. Caller must either hold the objects + * through dma_resv_lock() or the RCU read side lock through rcu_read_lock(), + * or one of the variants of each + */ +static inline struct dma_resv_list *dma_resv_shared_list(struct dma_resv *obj) +{ + return rcu_dereference_check(obj->fence, dma_resv_held(obj)); +} + void dma_resv_init(struct dma_resv *obj); void dma_resv_fini(struct dma_resv *obj); int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences); void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence); - void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence); - -int dma_resv_get_fences_rcu(struct dma_resv *obj, - struct dma_fence **pfence_excl, - unsigned *pshared_count, - struct dma_fence ***pshared); - +int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **pfence_excl, + unsigned *pshared_count, struct dma_fence ***pshared); int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src); - -long dma_resv_wait_timeout_rcu(struct dma_resv *obj, bool wait_all, bool intr, - unsigned long timeout); - -bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all); +long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr, + unsigned long timeout); +bool dma_resv_test_signaled(struct dma_resv *obj, bool test_all); #endif /* _LINUX_RESERVATION_H */ diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 004736b6a9..93c3ca5fda 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -229,12 +229,6 @@ enum sum_check_flags { */ typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t; -/** - * struct dma_chan_percpu - the per-CPU part of struct dma_chan - * @memcpy_count: transaction counter - * @bytes_transferred: byte counter - */ - /** * enum dma_desc_metadata_mode - per descriptor metadata mode types supported * @DESC_METADATA_CLIENT - the metadata buffer is allocated/provided by the @@ -291,6 +285,11 @@ enum dma_desc_metadata_mode { DESC_METADATA_ENGINE = BIT(1), }; +/** + * struct dma_chan_percpu - the per-CPU part of struct dma_chan + * @memcpy_count: transaction counter + * @bytes_transferred: byte counter + */ struct dma_chan_percpu { /* stats */ unsigned long memcpy_count; diff --git a/include/linux/dsa/8021q.h b/include/linux/dsa/8021q.h index b12b05f1c8..1587961f1a 100644 --- a/include/linux/dsa/8021q.h +++ b/include/linux/dsa/8021q.h @@ -37,8 +37,6 @@ struct dsa_8021q_context { #define DSA_8021Q_N_SUBVLAN 8 -#if IS_ENABLED(CONFIG_NET_DSA_TAG_8021Q) - int dsa_8021q_setup(struct dsa_8021q_context *ctx, bool enabled); int dsa_8021q_crosschip_bridge_join(struct dsa_8021q_context *ctx, int port, @@ -52,6 +50,9 @@ int dsa_8021q_crosschip_bridge_leave(struct dsa_8021q_context *ctx, int port, struct sk_buff *dsa_8021q_xmit(struct sk_buff *skb, struct net_device *netdev, u16 tpid, u16 tci); +void dsa_8021q_rcv(struct sk_buff *skb, int *source_port, int *switch_id, + int *subvlan); + u16 dsa_8021q_tx_vid(struct dsa_switch *ds, int port); u16 dsa_8021q_rx_vid(struct dsa_switch *ds, int port); @@ -70,78 +71,4 @@ bool vid_is_dsa_8021q_txvlan(u16 vid); bool vid_is_dsa_8021q(u16 vid); -#else - -int dsa_8021q_setup(struct dsa_8021q_context *ctx, bool enabled) -{ - return 0; -} - -int dsa_8021q_crosschip_bridge_join(struct dsa_8021q_context *ctx, int port, - struct dsa_8021q_context *other_ctx, - int other_port) -{ - return 0; -} - -int dsa_8021q_crosschip_bridge_leave(struct dsa_8021q_context *ctx, int port, - struct dsa_8021q_context *other_ctx, - int other_port) -{ - return 0; -} - -struct sk_buff *dsa_8021q_xmit(struct sk_buff *skb, struct net_device *netdev, - u16 tpid, u16 tci) -{ - return NULL; -} - -u16 dsa_8021q_tx_vid(struct dsa_switch *ds, int port) -{ - return 0; -} - -u16 dsa_8021q_rx_vid(struct dsa_switch *ds, int port) -{ - return 0; -} - -u16 dsa_8021q_rx_vid_subvlan(struct dsa_switch *ds, int port, u16 subvlan) -{ - return 0; -} - -int dsa_8021q_rx_switch_id(u16 vid) -{ - return 0; -} - -int dsa_8021q_rx_source_port(u16 vid) -{ - return 0; -} - -u16 dsa_8021q_rx_subvlan(u16 vid) -{ - return 0; -} - -bool vid_is_dsa_8021q_rxvlan(u16 vid) -{ - return false; -} - -bool vid_is_dsa_8021q_txvlan(u16 vid) -{ - return false; -} - -bool vid_is_dsa_8021q(u16 vid) -{ - return false; -} - -#endif /* IS_ENABLED(CONFIG_NET_DSA_TAG_8021Q) */ - #endif /* _NET_DSA_8021Q_H */ diff --git a/include/linux/dsa/sja1105.h b/include/linux/dsa/sja1105.h index 1eb84562b3..b6089b8831 100644 --- a/include/linux/dsa/sja1105.h +++ b/include/linux/dsa/sja1105.h @@ -14,6 +14,7 @@ #define ETH_P_SJA1105 ETH_P_DSA_8021Q #define ETH_P_SJA1105_META 0x0008 +#define ETH_P_SJA1110 0xdadc /* IEEE 802.3 Annex 57A: Slow Protocols PDUs (01:80:C2:xx:xx:xx) */ #define SJA1105_LINKLOCAL_FILTER_A 0x0180C2000000ull @@ -44,11 +45,14 @@ struct sja1105_tagger_data { */ spinlock_t meta_lock; unsigned long state; + u8 ts_id; }; struct sja1105_skb_cb { struct sk_buff *clone; - u32 meta_tstamp; + u64 tstamp; + /* Only valid for packets cloned for 2-step TX timestamping */ + u8 ts_id; }; #define SJA1105_SKB_CB(skb) \ @@ -65,4 +69,24 @@ struct sja1105_port { u16 xmit_tpid; }; +enum sja1110_meta_tstamp { + SJA1110_META_TSTAMP_TX = 0, + SJA1110_META_TSTAMP_RX = 1, +}; + +#if IS_ENABLED(CONFIG_NET_DSA_SJA1105_PTP) + +void sja1110_process_meta_tstamp(struct dsa_switch *ds, int port, u8 ts_id, + enum sja1110_meta_tstamp dir, u64 tstamp); + +#else + +static inline void sja1110_process_meta_tstamp(struct dsa_switch *ds, int port, + u8 ts_id, enum sja1110_meta_tstamp dir, + u64 tstamp) +{ +} + +#endif /* IS_ENABLED(CONFIG_NET_DSA_SJA1105_PTP) */ + #endif /* _NET_DSA_SJA1105_H */ diff --git a/include/linux/eeprom_93xx46.h b/include/linux/eeprom_93xx46.h index 99580c22f9..34c2175e6a 100644 --- a/include/linux/eeprom_93xx46.h +++ b/include/linux/eeprom_93xx46.h @@ -10,6 +10,9 @@ struct eeprom_93xx46_platform_data { #define EE_ADDR8 0x01 /* 8 bit addr. cfg */ #define EE_ADDR16 0x02 /* 16 bit addr. cfg */ #define EE_READONLY 0x08 /* forbid writing */ +#define EE_SIZE1K 0x10 /* 1 kb of data, that is a 93xx46 */ +#define EE_SIZE2K 0x20 /* 2 kb of data, that is a 93xx56 */ +#define EE_SIZE4K 0x40 /* 4 kb of data, that is a 93xx66 */ unsigned int quirks; /* Single word read transfers only; no sequential read. */ diff --git a/include/linux/elevator.h b/include/linux/elevator.h index dcb2f9022c..ef9ceead3d 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h @@ -117,9 +117,11 @@ extern void elv_merge_requests(struct request_queue *, struct request *, struct request *); extern void elv_merged_request(struct request_queue *, struct request *, enum elv_merge); -extern bool elv_attempt_insert_merge(struct request_queue *, struct request *); +extern bool elv_attempt_insert_merge(struct request_queue *, struct request *, + struct list_head *); extern struct request *elv_former_request(struct request_queue *, struct request *); extern struct request *elv_latter_request(struct request_queue *, struct request *); +void elevator_init_mq(struct request_queue *q); /* * io scheduler registration diff --git a/include/linux/energy_model.h b/include/linux/energy_model.h index 757fc60658..3f221dbf5f 100644 --- a/include/linux/energy_model.h +++ b/include/linux/energy_model.h @@ -91,6 +91,8 @@ void em_dev_unregister_perf_domain(struct device *dev); * @pd : performance domain for which energy has to be estimated * @max_util : highest utilization among CPUs of the domain * @sum_util : sum of the utilization of all CPUs in the domain + * @allowed_cpu_cap : maximum allowed CPU capacity for the @pd, which + might reflect reduced frequency (due to thermal) * * This function must be used only for CPU devices. There is no validation, * i.e. if the EM is a CPU type and has cpumask allocated. It is called from @@ -100,7 +102,8 @@ void em_dev_unregister_perf_domain(struct device *dev); * a capacity state satisfying the max utilization of the domain. */ static inline unsigned long em_cpu_energy(struct em_perf_domain *pd, - unsigned long max_util, unsigned long sum_util) + unsigned long max_util, unsigned long sum_util, + unsigned long allowed_cpu_cap) { unsigned long freq, scale_cpu; struct em_perf_state *ps; @@ -112,11 +115,17 @@ static inline unsigned long em_cpu_energy(struct em_perf_domain *pd, /* * In order to predict the performance state, map the utilization of * the most utilized CPU of the performance domain to a requested - * frequency, like schedutil. + * frequency, like schedutil. Take also into account that the real + * frequency might be set lower (due to thermal capping). Thus, clamp + * max utilization to the allowed CPU capacity before calculating + * effective frequency. */ cpu = cpumask_first(to_cpumask(pd->cpus)); scale_cpu = arch_scale_cpu_capacity(cpu); ps = &pd->table[pd->nr_perf_states - 1]; + + max_util = map_util_perf(max_util); + max_util = min(max_util, allowed_cpu_cap); freq = map_util_freq(max_util, ps->frequency, scale_cpu); /* @@ -209,7 +218,8 @@ static inline struct em_perf_domain *em_pd_get(struct device *dev) return NULL; } static inline unsigned long em_cpu_energy(struct em_perf_domain *pd, - unsigned long max_util, unsigned long sum_util) + unsigned long max_util, unsigned long sum_util, + unsigned long allowed_cpu_cap) { return 0; } diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index e030f7510c..232daaec56 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -401,12 +401,12 @@ struct ethtool_rmon_stats { * required information to the driver. */ struct ethtool_module_eeprom { - __u32 offset; - __u32 length; - __u8 page; - __u8 bank; - __u8 i2c_address; - __u8 *data; + u32 offset; + u32 length; + u8 page; + u8 bank; + u8 i2c_address; + u8 *data; }; /** @@ -757,6 +757,16 @@ void ethtool_params_from_link_mode(struct ethtool_link_ksettings *link_ksettings, enum ethtool_link_mode_bit_indices link_mode); +/** + * ethtool_get_phc_vclocks - Derive phc vclocks information, and caller + * is responsible to free memory of vclock_index + * @dev: pointer to net_device structure + * @vclock_index: pointer to pointer of vclock index + * + * Return number of phc vclocks + */ +int ethtool_get_phc_vclocks(struct net_device *dev, int **vclock_index); + /** * ethtool_sprintf - Write formatted string to ethtool string data * @data: Pointer to start of string to update diff --git a/include/linux/evm.h b/include/linux/evm.h index 8302bc29bb..4c374be702 100644 --- a/include/linux/evm.h +++ b/include/linux/evm.h @@ -23,18 +23,25 @@ extern enum integrity_status evm_verifyxattr(struct dentry *dentry, struct integrity_iint_cache *iint); extern int evm_inode_setattr(struct dentry *dentry, struct iattr *attr); extern void evm_inode_post_setattr(struct dentry *dentry, int ia_valid); -extern int evm_inode_setxattr(struct dentry *dentry, const char *name, +extern int evm_inode_setxattr(struct user_namespace *mnt_userns, + struct dentry *dentry, const char *name, const void *value, size_t size); extern void evm_inode_post_setxattr(struct dentry *dentry, const char *xattr_name, const void *xattr_value, size_t xattr_value_len); -extern int evm_inode_removexattr(struct dentry *dentry, const char *xattr_name); +extern int evm_inode_removexattr(struct user_namespace *mnt_userns, + struct dentry *dentry, const char *xattr_name); extern void evm_inode_post_removexattr(struct dentry *dentry, const char *xattr_name); extern int evm_inode_init_security(struct inode *inode, const struct xattr *xattr_array, struct xattr *evm); +extern bool evm_revalidate_status(const char *xattr_name); +extern int evm_protected_xattr_if_enabled(const char *req_xattr_name); +extern int evm_read_protected_xattrs(struct dentry *dentry, u8 *buffer, + int buffer_size, char type, + bool canonical_fmt); #ifdef CONFIG_FS_POSIX_ACL extern int posix_xattr_acl(const char *xattrname); #else @@ -71,7 +78,8 @@ static inline void evm_inode_post_setattr(struct dentry *dentry, int ia_valid) return; } -static inline int evm_inode_setxattr(struct dentry *dentry, const char *name, +static inline int evm_inode_setxattr(struct user_namespace *mnt_userns, + struct dentry *dentry, const char *name, const void *value, size_t size) { return 0; @@ -85,7 +93,8 @@ static inline void evm_inode_post_setxattr(struct dentry *dentry, return; } -static inline int evm_inode_removexattr(struct dentry *dentry, +static inline int evm_inode_removexattr(struct user_namespace *mnt_userns, + struct dentry *dentry, const char *xattr_name) { return 0; @@ -104,5 +113,22 @@ static inline int evm_inode_init_security(struct inode *inode, return 0; } +static inline bool evm_revalidate_status(const char *xattr_name) +{ + return false; +} + +static inline int evm_protected_xattr_if_enabled(const char *req_xattr_name) +{ + return false; +} + +static inline int evm_read_protected_xattrs(struct dentry *dentry, u8 *buffer, + int buffer_size, char type, + bool canonical_fmt) +{ + return -EOPNOTSUPP; +} + #endif /* CONFIG_EVM */ #endif /* LINUX_EVM_H */ diff --git a/include/linux/export.h b/include/linux/export.h index 6271a5d9c9..27d848712b 100644 --- a/include/linux/export.h +++ b/include/linux/export.h @@ -140,7 +140,12 @@ struct kernel_symbol { #define ___cond_export_sym(sym, sec, ns, enabled) \ __cond_export_sym_##enabled(sym, sec, ns) #define __cond_export_sym_1(sym, sec, ns) ___EXPORT_SYMBOL(sym, sec, ns) + +#ifdef __GENKSYMS__ +#define __cond_export_sym_0(sym, sec, ns) __GENKSYMS_EXPORT_SYMBOL(sym) +#else #define __cond_export_sym_0(sym, sec, ns) /* nothing */ +#endif #else diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h index 5487a80617..d445150c53 100644 --- a/include/linux/f2fs_fs.h +++ b/include/linux/f2fs_fs.h @@ -34,6 +34,7 @@ #define F2FS_ROOT_INO(sbi) ((sbi)->root_ino_num) #define F2FS_NODE_INO(sbi) ((sbi)->node_ino_num) #define F2FS_META_INO(sbi) ((sbi)->meta_ino_num) +#define F2FS_COMPRESS_INO(sbi) (NM_I(sbi)->max_nid) #define F2FS_MAX_QUOTAS 3 @@ -229,6 +230,7 @@ struct f2fs_extent { #define F2FS_INLINE_DOTS 0x10 /* file having implicit dot dentries */ #define F2FS_EXTRA_ATTR 0x20 /* file having extra attribute */ #define F2FS_PIN_FILE 0x40 /* file should not be gced */ +#define F2FS_COMPRESS_RELEASED 0x80 /* file released compressed blocks */ struct f2fs_inode { __le16 i_mode; /* file mode */ diff --git a/include/linux/fcntl.h b/include/linux/fcntl.h index 766fcd973b..a332e79b32 100644 --- a/include/linux/fcntl.h +++ b/include/linux/fcntl.h @@ -12,10 +12,6 @@ FASYNC | O_DIRECT | O_LARGEFILE | O_DIRECTORY | O_NOFOLLOW | \ O_NOATIME | O_CLOEXEC | O_PATH | __O_TMPFILE) -/* List of all valid flags for the how->upgrade_mask argument: */ -#define VALID_UPGRADE_FLAGS \ - (UPGRADE_NOWRITE | UPGRADE_NOREAD) - /* List of all valid flags for the how->resolve argument: */ #define VALID_RESOLVE_FLAGS \ (RESOLVE_NO_XDEV | RESOLVE_NO_MAGICLINKS | RESOLVE_NO_SYMLINKS | \ diff --git a/include/linux/filter.h b/include/linux/filter.h index 9a09547bc7..472f97074d 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -646,6 +646,7 @@ struct bpf_redirect_info { u32 flags; u32 tgt_index; void *tgt_value; + struct bpf_map *map; u32 map_id; enum bpf_map_type map_type; u32 kern_flags; @@ -762,11 +763,9 @@ DECLARE_BPF_DISPATCHER(xdp) static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog, struct xdp_buff *xdp) { - /* Caller needs to hold rcu_read_lock() (!), otherwise program - * can be released while still running, or map elements could be - * freed early while still having concurrent users. XDP fastpath - * already takes rcu_read_lock() when fetching the program, so - * it's not necessary here anymore. + /* Driver XDP hooks are invoked within a single NAPI poll cycle and thus + * under local_bh_disable(), which provides the needed RCU protection + * for accessing map entries. */ return __BPF_PROG_RUN(prog, xdp, BPF_DISPATCHER_FUNC(xdp)); } @@ -995,11 +994,13 @@ void bpf_warn_invalid_xdp_action(u32 act); #ifdef CONFIG_INET struct sock *bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk, struct bpf_prog *prog, struct sk_buff *skb, + struct sock *migrating_sk, u32 hash); #else static inline struct sock * bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk, struct bpf_prog *prog, struct sk_buff *skb, + struct sock *migrating_sk, u32 hash) { return NULL; @@ -1464,17 +1465,19 @@ static inline bool bpf_sk_lookup_run_v6(struct net *net, int protocol, } #endif /* IS_ENABLED(CONFIG_IPV6) */ -static __always_inline int __bpf_xdp_redirect_map(struct bpf_map *map, u32 ifindex, u64 flags, +static __always_inline int __bpf_xdp_redirect_map(struct bpf_map *map, u32 ifindex, + u64 flags, const u64 flag_mask, void *lookup_elem(struct bpf_map *map, u32 key)) { struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); + const u64 action_mask = XDP_ABORTED | XDP_DROP | XDP_PASS | XDP_TX; /* Lower bits of the flags are used as return code on lookup failure */ - if (unlikely(flags > XDP_TX)) + if (unlikely(flags & ~(action_mask | flag_mask))) return XDP_ABORTED; ri->tgt_value = lookup_elem(map, ifindex); - if (unlikely(!ri->tgt_value)) { + if (unlikely(!ri->tgt_value) && !(flags & BPF_F_BROADCAST)) { /* If the lookup fails we want to clear out the state in the * redirect_info struct completely, so that if an eBPF program * performs multiple lookups, the last one always takes @@ -1482,13 +1485,21 @@ static __always_inline int __bpf_xdp_redirect_map(struct bpf_map *map, u32 ifind */ ri->map_id = INT_MAX; /* Valid map id idr range: [1,INT_MAX[ */ ri->map_type = BPF_MAP_TYPE_UNSPEC; - return flags; + return flags & action_mask; } ri->tgt_index = ifindex; ri->map_id = map->id; ri->map_type = map->map_type; + if (flags & BPF_F_BROADCAST) { + WRITE_ONCE(ri->map, map); + ri->flags = flags; + } else { + WRITE_ONCE(ri->map, NULL); + ri->flags = 0; + } + return XDP_REDIRECT; } diff --git a/include/linux/firmware.h b/include/linux/firmware.h index 84e346ae76..25109192ce 100644 --- a/include/linux/firmware.h +++ b/include/linux/firmware.h @@ -6,8 +6,8 @@ #include #include -#define FW_ACTION_NOHOTPLUG 0 -#define FW_ACTION_HOTPLUG 1 +#define FW_ACTION_NOUEVENT 0 +#define FW_ACTION_UEVENT 1 struct firmware { size_t size; diff --git a/include/linux/fpga/altera-pr-ip-core.h b/include/linux/fpga/altera-pr-ip-core.h index 0b08ac20ab..a6b4c07858 100644 --- a/include/linux/fpga/altera-pr-ip-core.h +++ b/include/linux/fpga/altera-pr-ip-core.h @@ -13,6 +13,5 @@ #include int alt_pr_register(struct device *dev, void __iomem *reg_base); -void alt_pr_unregister(struct device *dev); #endif /* _ALT_PR_IP_CORE_H */ diff --git a/include/linux/fpga/fpga-bridge.h b/include/linux/fpga/fpga-bridge.h index 817600a32c..6c3c28806f 100644 --- a/include/linux/fpga/fpga-bridge.h +++ b/include/linux/fpga/fpga-bridge.h @@ -11,7 +11,7 @@ struct fpga_bridge; /** * struct fpga_bridge_ops - ops for low level FPGA bridge drivers * @enable_show: returns the FPGA bridge's status - * @enable_set: set a FPGA bridge as enabled or disabled + * @enable_set: set an FPGA bridge as enabled or disabled * @fpga_bridge_remove: set FPGA into a specific state during driver remove * @groups: optional attribute groups. */ diff --git a/include/linux/fpga/fpga-mgr.h b/include/linux/fpga/fpga-mgr.h index 2bc3030a69..ec2cd8bfce 100644 --- a/include/linux/fpga/fpga-mgr.h +++ b/include/linux/fpga/fpga-mgr.h @@ -75,7 +75,7 @@ enum fpga_mgr_states { #define FPGA_MGR_COMPRESSED_BITSTREAM BIT(4) /** - * struct fpga_image_info - information specific to a FPGA image + * struct fpga_image_info - information specific to an FPGA image * @flags: boolean flags as defined above * @enable_timeout_us: maximum time to enable traffic through bridge (uSec) * @disable_timeout_us: maximum time to disable traffic through bridge (uSec) diff --git a/include/linux/fs.h b/include/linux/fs.h index c3c88fdb9b..6405742942 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2171,7 +2171,6 @@ struct super_operations { ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t); struct dquot **(*get_dquots)(struct inode *); #endif - int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t); long (*nr_cached_objects)(struct super_block *, struct shrink_control *); long (*free_cached_objects)(struct super_block *, @@ -2769,8 +2768,14 @@ extern long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode); extern struct file *file_open_name(struct filename *, int, umode_t); extern struct file *filp_open(const char *, int, umode_t); -extern struct file *file_open_root(struct dentry *, struct vfsmount *, +extern struct file *file_open_root(const struct path *, const char *, int, umode_t); +static inline struct file *file_open_root_mnt(struct vfsmount *mnt, + const char *name, int flags, umode_t mode) +{ + return file_open_root(&(struct path){.mnt = mnt, .dentry = mnt->mnt_root}, + name, flags, mode); +} extern struct file * dentry_open(const struct path *, int, const struct cred *); extern struct file * open_with_fake_path(const struct path *, int, struct inode*, const struct cred *); @@ -3242,11 +3247,8 @@ ssize_t vfs_iocb_iter_write(struct file *file, struct kiocb *iocb, struct iov_iter *iter); /* fs/block_dev.c */ -extern ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to); -extern ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from); extern int blkdev_fsync(struct file *filp, loff_t start, loff_t end, int datasync); -extern void block_sync_page(struct page *page); /* fs/splice.c */ extern ssize_t generic_file_splice_read(struct file *, loff_t *, @@ -3417,18 +3419,14 @@ extern int simple_rename(struct user_namespace *, struct inode *, extern void simple_recursive_removal(struct dentry *, void (*callback)(struct dentry *)); extern int noop_fsync(struct file *, loff_t, loff_t, int); -extern int noop_set_page_dirty(struct page *page); extern void noop_invalidatepage(struct page *page, unsigned int offset, unsigned int length); extern ssize_t noop_direct_IO(struct kiocb *iocb, struct iov_iter *iter); extern int simple_empty(struct dentry *); -extern int simple_readpage(struct file *file, struct page *page); extern int simple_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata); -extern int simple_write_end(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, unsigned copied, - struct page *page, void *fsdata); +extern const struct address_space_operations ram_aops; extern int always_delete_dentry(const struct dentry *); extern struct inode *alloc_anon_inode(struct super_block *); extern int simple_nosetlease(struct file *, long, struct file_lock **, void **); diff --git a/include/linux/fs_context.h b/include/linux/fs_context.h index 37e1e8f7f0..e2bc16300c 100644 --- a/include/linux/fs_context.h +++ b/include/linux/fs_context.h @@ -139,6 +139,8 @@ extern int vfs_parse_fs_string(struct fs_context *fc, const char *key, extern int generic_parse_monolithic(struct fs_context *fc, void *data); extern int vfs_get_tree(struct fs_context *fc); extern void put_fs_context(struct fs_context *fc); +extern int vfs_parse_fs_param_source(struct fs_context *fc, + struct fs_parameter *param); /* * sget() wrappers to be called from the ->get_tree() op. diff --git a/include/linux/ftrace_irq.h b/include/linux/ftrace_irq.h index 0abd9a1d28..f6faa31289 100644 --- a/include/linux/ftrace_irq.h +++ b/include/linux/ftrace_irq.h @@ -7,12 +7,21 @@ extern bool trace_hwlat_callback_enabled; extern void trace_hwlat_callback(bool enter); #endif +#ifdef CONFIG_OSNOISE_TRACER +extern bool trace_osnoise_callback_enabled; +extern void trace_osnoise_callback(bool enter); +#endif + static inline void ftrace_nmi_enter(void) { #ifdef CONFIG_HWLAT_TRACER if (trace_hwlat_callback_enabled) trace_hwlat_callback(true); #endif +#ifdef CONFIG_OSNOISE_TRACER + if (trace_osnoise_callback_enabled) + trace_osnoise_callback(true); +#endif } static inline void ftrace_nmi_exit(void) @@ -21,6 +30,10 @@ static inline void ftrace_nmi_exit(void) if (trace_hwlat_callback_enabled) trace_hwlat_callback(false); #endif +#ifdef CONFIG_OSNOISE_TRACER + if (trace_osnoise_callback_enabled) + trace_osnoise_callback(false); +#endif } #endif /* _LINUX_FTRACE_IRQ_H */ diff --git a/include/linux/fwnode_mdio.h b/include/linux/fwnode_mdio.h new file mode 100644 index 0000000000..faf603c48c --- /dev/null +++ b/include/linux/fwnode_mdio.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * FWNODE helper for the MDIO (Ethernet PHY) API + */ + +#ifndef __LINUX_FWNODE_MDIO_H +#define __LINUX_FWNODE_MDIO_H + +#include + +#if IS_ENABLED(CONFIG_FWNODE_MDIO) +int fwnode_mdiobus_phy_device_register(struct mii_bus *mdio, + struct phy_device *phy, + struct fwnode_handle *child, u32 addr); + +int fwnode_mdiobus_register_phy(struct mii_bus *bus, + struct fwnode_handle *child, u32 addr); + +#else /* CONFIG_FWNODE_MDIO */ +int fwnode_mdiobus_phy_device_register(struct mii_bus *mdio, + struct phy_device *phy, + struct fwnode_handle *child, u32 addr) +{ + return -EINVAL; +} + +static inline int fwnode_mdiobus_register_phy(struct mii_bus *bus, + struct fwnode_handle *child, + u32 addr) +{ + return -EINVAL; +} +#endif + +#endif /* __LINUX_FWNODE_MDIO_H */ diff --git a/include/linux/genhd.h b/include/linux/genhd.h index 6fc26f7bdf..13b34177cc 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -153,6 +153,11 @@ struct gendisk { unsigned long state; #define GD_NEED_PART_SCAN 0 #define GD_READ_ONLY 1 +#define GD_QUEUE_REF 2 + + struct mutex open_mutex; /* open/close mutex */ + unsigned open_partitions; /* number of open partitions */ + struct kobject *slave_dir; struct timer_rand_state *random; @@ -218,7 +223,6 @@ static inline void add_disk_no_queue_reg(struct gendisk *disk) } extern void del_gendisk(struct gendisk *gp); -extern struct block_device *bdget_disk(struct gendisk *disk, int partno); void set_disk_ro(struct gendisk *disk, bool read_only); @@ -252,8 +256,7 @@ static inline sector_t get_capacity(struct gendisk *disk) return bdev_nr_sectors(disk->part0); } -int bdev_disk_changed(struct block_device *bdev, bool invalidate); -int blk_add_partitions(struct gendisk *disk, struct block_device *bdev); +int bdev_disk_changed(struct gendisk *disk, bool invalidate); void blk_drop_partitions(struct gendisk *disk); extern struct gendisk *__alloc_disk_node(int minors, int node_id); @@ -277,6 +280,28 @@ extern void put_disk(struct gendisk *disk); #define alloc_disk(minors) alloc_disk_node(minors, NUMA_NO_NODE) +/** + * blk_alloc_disk - allocate a gendisk structure + * @node_id: numa node to allocate on + * + * Allocate and pre-initialize a gendisk structure for use with BIO based + * drivers. + * + * Context: can sleep + */ +#define blk_alloc_disk(node_id) \ +({ \ + struct gendisk *__disk = __blk_alloc_disk(node_id); \ + static struct lock_class_key __key; \ + \ + if (__disk) \ + lockdep_init_map(&__disk->lockdep_map, \ + "(bio completion)", &__key, 0); \ + __disk; \ +}) +struct gendisk *__blk_alloc_disk(int node); +void blk_cleanup_disk(struct gendisk *disk); + int __register_blkdev(unsigned int major, const char *name, void (*probe)(dev_t devt)); #define register_blkdev(major, name) \ @@ -306,6 +331,7 @@ static inline void bd_unlink_disk_holder(struct block_device *bdev, } #endif /* CONFIG_SYSFS */ +dev_t part_devt(struct gendisk *disk, u8 partno); dev_t blk_lookup_devt(const char *name, int partno); void blk_request_module(dev_t devt); #ifdef CONFIG_BLOCK diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 11da8af067..55b2ec1f96 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -53,8 +53,10 @@ struct vm_area_struct; #define ___GFP_HARDWALL 0x100000u #define ___GFP_THISNODE 0x200000u #define ___GFP_ACCOUNT 0x400000u +#define ___GFP_ZEROTAGS 0x800000u +#define ___GFP_SKIP_KASAN_POISON 0x1000000u #ifdef CONFIG_LOCKDEP -#define ___GFP_NOLOCKDEP 0x800000u +#define ___GFP_NOLOCKDEP 0x2000000u #else #define ___GFP_NOLOCKDEP 0 #endif @@ -229,16 +231,25 @@ struct vm_area_struct; * %__GFP_COMP address compound page metadata. * * %__GFP_ZERO returns a zeroed page on success. + * + * %__GFP_ZEROTAGS returns a page with zeroed memory tags on success, if + * __GFP_ZERO is set. + * + * %__GFP_SKIP_KASAN_POISON returns a page which does not need to be poisoned + * on deallocation. Typically used for userspace pages. Currently only has an + * effect in HW tags mode. */ #define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN) #define __GFP_COMP ((__force gfp_t)___GFP_COMP) #define __GFP_ZERO ((__force gfp_t)___GFP_ZERO) +#define __GFP_ZEROTAGS ((__force gfp_t)___GFP_ZEROTAGS) +#define __GFP_SKIP_KASAN_POISON ((__force gfp_t)___GFP_SKIP_KASAN_POISON) /* Disable lockdep for GFP context tracking */ #define __GFP_NOLOCKDEP ((__force gfp_t)___GFP_NOLOCKDEP) /* Room for N __GFP_FOO bits */ -#define __GFP_BITS_SHIFT (23 + IS_ENABLED(CONFIG_LOCKDEP)) +#define __GFP_BITS_SHIFT (25 + IS_ENABLED(CONFIG_LOCKDEP)) #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) /** @@ -319,7 +330,8 @@ struct vm_area_struct; #define GFP_DMA __GFP_DMA #define GFP_DMA32 __GFP_DMA32 #define GFP_HIGHUSER (GFP_USER | __GFP_HIGHMEM) -#define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE) +#define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE | \ + __GFP_SKIP_KASAN_POISON) #define GFP_TRANSHUGE_LIGHT ((GFP_HIGHUSER_MOVABLE | __GFP_COMP | \ __GFP_NOMEMALLOC | __GFP_NOWARN) & ~__GFP_RECLAIM) #define GFP_TRANSHUGE (GFP_TRANSHUGE_LIGHT | __GFP_DIRECT_RECLAIM) @@ -494,8 +506,8 @@ static inline int gfp_zonelist(gfp_t flags) * There are two zonelists per node, one for all zones with memory and * one containing just zones from the node the zonelist belongs to. * - * For the normal case of non-DISCONTIGMEM systems the NODE_DATA() gets - * optimized to &contig_page_data at compile-time. + * For the case of non-NUMA systems the NODE_DATA() gets optimized to + * &contig_page_data at compile-time. */ static inline struct zonelist *node_zonelist(int nid, gfp_t flags) { @@ -536,6 +548,15 @@ alloc_pages_bulk_array(gfp_t gfp, unsigned long nr_pages, struct page **page_arr return __alloc_pages_bulk(gfp, numa_mem_id(), NULL, nr_pages, NULL, page_array); } +static inline unsigned long +alloc_pages_bulk_array_node(gfp_t gfp, int nid, unsigned long nr_pages, struct page **page_array) +{ + if (nid == NUMA_NO_NODE) + nid = numa_mem_id(); + + return __alloc_pages_bulk(gfp, nid, NULL, nr_pages, NULL, page_array); +} + /* * Allocate pages, preferring the node given as nid. The node must be valid and * online. For more general interface, see alloc_pages_node(). diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h index c73b25bc92..566feb5660 100644 --- a/include/linux/gpio/consumer.h +++ b/include/linux/gpio/consumer.h @@ -692,6 +692,8 @@ int devm_acpi_dev_add_driver_gpios(struct device *dev, const struct acpi_gpio_mapping *gpios); void devm_acpi_dev_remove_driver_gpios(struct device *dev); +struct gpio_desc *acpi_get_and_request_gpiod(char *path, int pin, char *label); + #else /* CONFIG_GPIOLIB && CONFIG_ACPI */ struct acpi_device; diff --git a/include/linux/gpio/regmap.h b/include/linux/gpio/regmap.h index 334dd92804..a9f7b7faf5 100644 --- a/include/linux/gpio/regmap.h +++ b/include/linux/gpio/regmap.h @@ -37,6 +37,9 @@ struct regmap; * offset to a register/bitmask pair. If not * given the default gpio_regmap_simple_xlate() * is used. + * @drvdata: (Optional) Pointer to driver specific data which is + * not used by gpio-remap but is provided "as is" to the + * driver callback(s). * * The ->reg_mask_xlate translates a given base address and GPIO offset to * register and mask pair. The base address is one of the given register @@ -78,13 +81,14 @@ struct gpio_regmap_config { int (*reg_mask_xlate)(struct gpio_regmap *gpio, unsigned int base, unsigned int offset, unsigned int *reg, unsigned int *mask); + + void *drvdata; }; struct gpio_regmap *gpio_regmap_register(const struct gpio_regmap_config *config); void gpio_regmap_unregister(struct gpio_regmap *gpio); struct gpio_regmap *devm_gpio_regmap_register(struct device *dev, const struct gpio_regmap_config *config); -void gpio_regmap_set_drvdata(struct gpio_regmap *gpio, void *data); void *gpio_regmap_get_drvdata(struct gpio_regmap *gpio); #endif /* _LINUX_GPIO_REGMAP_H */ diff --git a/include/linux/hid.h b/include/linux/hid.h index 10e922cee4..9e067f937d 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -102,6 +102,7 @@ struct hid_item { #define HID_COLLECTION_PHYSICAL 0 #define HID_COLLECTION_APPLICATION 1 #define HID_COLLECTION_LOGICAL 2 +#define HID_COLLECTION_NAMED_ARRAY 4 /* * HID report descriptor global item tags @@ -800,6 +801,7 @@ struct hid_driver { * @raw_request: send raw report request to device (e.g. feature report) * @output_report: send output report to device * @idle: send idle request to device + * @may_wakeup: return if device may act as a wakeup source during system-suspend */ struct hid_ll_driver { int (*start)(struct hid_device *hdev); @@ -824,6 +826,7 @@ struct hid_ll_driver { int (*output_report) (struct hid_device *hdev, __u8 *buf, size_t len); int (*idle)(struct hid_device *hdev, int report, int idle, int reqtype); + bool (*may_wakeup)(struct hid_device *hdev); }; extern struct hid_ll_driver i2c_hid_ll_driver; @@ -1149,6 +1152,22 @@ static inline int hid_hw_idle(struct hid_device *hdev, int report, int idle, return 0; } +/** + * hid_may_wakeup - return if the hid device may act as a wakeup source during system-suspend + * + * @hdev: hid device + */ +static inline bool hid_hw_may_wakeup(struct hid_device *hdev) +{ + if (hdev->ll_driver->may_wakeup) + return hdev->ll_driver->may_wakeup(hdev); + + if (hdev->dev.parent) + return device_may_wakeup(hdev->dev.parent); + + return false; +} + /** * hid_hw_wait - wait for buffered io to complete * diff --git a/include/linux/highmem.h b/include/linux/highmem.h index 832b49b50c..8c6e8e996c 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -152,28 +152,24 @@ static inline void clear_user_highpage(struct page *page, unsigned long vaddr) } #endif -#ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE +#ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE /** - * __alloc_zeroed_user_highpage - Allocate a zeroed HIGHMEM page for a VMA with caller-specified movable GFP flags - * @movableflags: The GFP flags related to the pages future ability to move like __GFP_MOVABLE + * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move * @vma: The VMA the page is to be allocated for * @vaddr: The virtual address the page will be inserted into * - * This function will allocate a page for a VMA but the caller is expected - * to specify via movableflags whether the page will be movable in the - * future or not + * This function will allocate a page for a VMA that the caller knows will + * be able to migrate in the future using move_pages() or reclaimed * * An architecture may override this function by defining - * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE and providing their own + * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE and providing their own * implementation. */ static inline struct page * -__alloc_zeroed_user_highpage(gfp_t movableflags, - struct vm_area_struct *vma, - unsigned long vaddr) +alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma, + unsigned long vaddr) { - struct page *page = alloc_page_vma(GFP_HIGHUSER | movableflags, - vma, vaddr); + struct page *page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr); if (page) clear_user_highpage(page, vaddr); @@ -182,21 +178,6 @@ __alloc_zeroed_user_highpage(gfp_t movableflags, } #endif -/** - * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move - * @vma: The VMA the page is to be allocated for - * @vaddr: The virtual address the page will be inserted into - * - * This function will allocate a page for a VMA that the caller knows will - * be able to migrate in the future using move_pages() or reclaimed - */ -static inline struct page * -alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma, - unsigned long vaddr) -{ - return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr); -} - static inline void clear_highpage(struct page *page) { void *kaddr = kmap_atomic(page); @@ -204,6 +185,14 @@ static inline void clear_highpage(struct page *page) kunmap_atomic(kaddr); } +#ifndef __HAVE_ARCH_TAG_CLEAR_HIGHPAGE + +static inline void tag_clear_highpage(struct page *page) +{ +} + +#endif + /* * If we pass in a base or tail page, we can zero up to PAGE_SIZE. * If we pass in a head page, we can zero up to the size of the compound page. diff --git a/include/linux/hmm.h b/include/linux/hmm.h index 866a0fa104..2fd2e91d51 100644 --- a/include/linux/hmm.h +++ b/include/linux/hmm.h @@ -113,7 +113,7 @@ int hmm_range_fault(struct hmm_range *range); * HMM_RANGE_DEFAULT_TIMEOUT - default timeout (ms) when waiting for a range * * When waiting for mmu notifiers we need some kind of time out otherwise we - * could potentialy wait for ever, 1000ms ie 1s sounds like a long time to + * could potentially wait for ever, 1000ms ie 1s sounds like a long time to * wait already. */ #define HMM_RANGE_DEFAULT_TIMEOUT 1000 diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 2a8ebe6c22..f123e15d96 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -10,8 +10,8 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf); int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, - struct vm_area_struct *vma); -void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd); + struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma); +void huge_pmd_set_accessed(struct vm_fault *vmf); int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm, pud_t *dst_pud, pud_t *src_pud, unsigned long addr, struct vm_area_struct *vma); @@ -24,7 +24,7 @@ static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud) } #endif -vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd); +vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf); struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmd, unsigned int flags); @@ -115,9 +115,34 @@ extern struct kobj_attribute shmem_enabled_attr; extern unsigned long transparent_hugepage_flags; +static inline bool transhuge_vma_suitable(struct vm_area_struct *vma, + unsigned long haddr) +{ + /* Don't have to check pgoff for anonymous vma */ + if (!vma_is_anonymous(vma)) { + if (!IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff, + HPAGE_PMD_NR)) + return false; + } + + if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end) + return false; + return true; +} + +static inline bool transhuge_vma_enabled(struct vm_area_struct *vma, + unsigned long vm_flags) +{ + /* Explicitly disabled through madvise. */ + if ((vm_flags & VM_NOHUGEPAGE) || + test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)) + return false; + return true; +} + /* * to be used on vmas which are known to support THP. - * Use transparent_hugepage_enabled otherwise + * Use transparent_hugepage_active otherwise */ static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma) { @@ -128,15 +153,12 @@ static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma) if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_NEVER_DAX)) return false; - if (vma->vm_flags & VM_NOHUGEPAGE) + if (!transhuge_vma_enabled(vma, vma->vm_flags)) return false; if (vma_is_temporary_stack(vma)) return false; - if (test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)) - return false; - if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_FLAG)) return true; @@ -150,24 +172,7 @@ static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma) return false; } -bool transparent_hugepage_enabled(struct vm_area_struct *vma); - -#define HPAGE_CACHE_INDEX_MASK (HPAGE_PMD_NR - 1) - -static inline bool transhuge_vma_suitable(struct vm_area_struct *vma, - unsigned long haddr) -{ - /* Don't have to check pgoff for anonymous vma */ - if (!vma_is_anonymous(vma)) { - if (((vma->vm_start >> PAGE_SHIFT) & HPAGE_CACHE_INDEX_MASK) != - (vma->vm_pgoff & HPAGE_CACHE_INDEX_MASK)) - return false; - } - - if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end) - return false; - return true; -} +bool transparent_hugepage_active(struct vm_area_struct *vma); #define transparent_hugepage_use_zero_page() \ (transparent_hugepage_flags & \ @@ -283,7 +288,7 @@ struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr, pud_t *pud, int flags, struct dev_pagemap **pgmap); -vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd); +vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf); extern struct page *huge_zero_page; extern unsigned long huge_zero_pfn; @@ -354,7 +359,7 @@ static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma) return false; } -static inline bool transparent_hugepage_enabled(struct vm_area_struct *vma) +static inline bool transparent_hugepage_active(struct vm_area_struct *vma) { return false; } @@ -365,6 +370,12 @@ static inline bool transhuge_vma_suitable(struct vm_area_struct *vma, return false; } +static inline bool transhuge_vma_enabled(struct vm_area_struct *vma, + unsigned long vm_flags) +{ + return false; +} + static inline void prep_transhuge_page(struct page *page) {} static inline bool is_transparent_hugepage(struct page *page) @@ -430,8 +441,7 @@ static inline spinlock_t *pud_trans_huge_lock(pud_t *pud, return NULL; } -static inline vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, - pmd_t orig_pmd) +static inline vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf) { return 0; } diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 3c01176567..f7ca1a3870 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -29,12 +29,29 @@ typedef struct { unsigned long pd; } hugepd_t; #include #include +/* + * For HugeTLB page, there are more metadata to save in the struct page. But + * the head struct page cannot meet our needs, so we have to abuse other tail + * struct page to store the metadata. In order to avoid conflicts caused by + * subsequent use of more tail struct pages, we gather these discrete indexes + * of tail struct page here. + */ +enum { + SUBPAGE_INDEX_SUBPOOL = 1, /* reuse page->private */ +#ifdef CONFIG_CGROUP_HUGETLB + SUBPAGE_INDEX_CGROUP, /* reuse page->private */ + SUBPAGE_INDEX_CGROUP_RSVD, /* reuse page->private */ + __MAX_CGROUP_SUBPAGE_INDEX = SUBPAGE_INDEX_CGROUP_RSVD, +#endif + __NR_USED_SUBPAGE, +}; + struct hugepage_subpool { spinlock_t lock; long count; long max_hpages; /* Maximum huge pages or -1 if no maximum. */ long used_hpages; /* Used count against maximum, includes */ - /* both alloced and reserved pages. */ + /* both allocated and reserved pages. */ struct hstate *hstate; long min_hpages; /* Minimum huge pages or -1 if no minimum. */ long rsv_hpages; /* Pages reserved against global pool to */ @@ -68,7 +85,7 @@ struct resv_map { * by a resv_map's lock. The set of regions within the resv_map represent * reservations for huge pages, or huge pages that have already been * instantiated within the map. The from and to elements are huge page - * indicies into the associated mapping. from indicates the starting index + * indices into the associated mapping. from indicates the starting index * of the region. to represents the first index past the end of the region. * * For example, a file region structure with from == 0 and to == 4 represents @@ -451,7 +468,7 @@ static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode) extern const struct file_operations hugetlbfs_file_operations; extern const struct vm_operations_struct hugetlb_vm_ops; struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct, - struct user_struct **user, int creat_flags, + struct ucounts **ucounts, int creat_flags, int page_size_log); static inline bool is_file_hugepages(struct file *file) @@ -471,7 +488,7 @@ static inline struct hstate *hstate_inode(struct inode *i) #define is_file_hugepages(file) false static inline struct file * hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag, - struct user_struct **user, int creat_flags, + struct ucounts **ucounts, int creat_flags, int page_size_log) { return ERR_PTR(-ENOSYS); @@ -515,12 +532,14 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, * modifications require hugetlb_lock. * HPG_freed - Set when page is on the free lists. * Synchronization: hugetlb_lock held for examination and modification. + * HPG_vmemmap_optimized - Set when the vmemmap pages of the page are freed. */ enum hugetlb_page_flags { HPG_restore_reserve = 0, HPG_migratable, HPG_temporary, HPG_freed, + HPG_vmemmap_optimized, __NR_HPAGEFLAGS, }; @@ -566,6 +585,7 @@ HPAGEFLAG(RestoreReserve, restore_reserve) HPAGEFLAG(Migratable, migratable) HPAGEFLAG(Temporary, temporary) HPAGEFLAG(Freed, freed) +HPAGEFLAG(VmemmapOptimized, vmemmap_optimized) #ifdef CONFIG_HUGETLB_PAGE @@ -588,6 +608,9 @@ struct hstate { unsigned int nr_huge_pages_node[MAX_NUMNODES]; unsigned int free_huge_pages_node[MAX_NUMNODES]; unsigned int surplus_huge_pages_node[MAX_NUMNODES]; +#ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP + unsigned int nr_free_vmemmap_pages; +#endif #ifdef CONFIG_CGROUP_HUGETLB /* cgroup control files */ struct cftype cgroup_files_dfl[7]; @@ -635,13 +658,13 @@ extern unsigned int default_hstate_idx; */ static inline struct hugepage_subpool *hugetlb_page_subpool(struct page *hpage) { - return (struct hugepage_subpool *)(hpage+1)->private; + return (void *)page_private(hpage + SUBPAGE_INDEX_SUBPOOL); } static inline void hugetlb_set_page_subpool(struct page *hpage, struct hugepage_subpool *subpool) { - set_page_private(hpage+1, (unsigned long)subpool); + set_page_private(hpage + SUBPAGE_INDEX_SUBPOOL, (unsigned long)subpool); } static inline struct hstate *hstate_file(struct file *f) @@ -718,8 +741,8 @@ static inline void arch_clear_hugepage_flags(struct page *page) { } #endif #ifndef arch_make_huge_pte -static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma, - struct page *page, int writable) +static inline pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, + vm_flags_t flags) { return entry; } @@ -774,7 +797,7 @@ static inline bool hugepage_migration_supported(struct hstate *h) * It determines whether or not a huge page should be placed on * movable zone or not. Movability of any huge page should be * required only if huge page size is supported for migration. - * There wont be any reason for the huge page to be movable if + * There won't be any reason for the huge page to be movable if * it is not migratable to start with. Also the size of the huge * page should be large enough to be placed under a movable zone * and still feasible enough to be migratable. Just the presence @@ -875,6 +898,11 @@ static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, #else /* CONFIG_HUGETLB_PAGE */ struct hstate {}; +static inline struct hugepage_subpool *hugetlb_page_subpool(struct page *hpage) +{ + return NULL; +} + static inline int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list) { @@ -1028,6 +1056,12 @@ static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr } #endif /* CONFIG_HUGETLB_PAGE */ +#ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP +extern bool hugetlb_free_vmemmap_enabled; +#else +#define hugetlb_free_vmemmap_enabled false +#endif + static inline spinlock_t *huge_pte_lock(struct hstate *h, struct mm_struct *mm, pte_t *pte) { diff --git a/include/linux/hugetlb_cgroup.h b/include/linux/hugetlb_cgroup.h index 0bff345c4b..0b8d1fdda3 100644 --- a/include/linux/hugetlb_cgroup.h +++ b/include/linux/hugetlb_cgroup.h @@ -21,15 +21,16 @@ struct hugetlb_cgroup; struct resv_map; struct file_region; +#ifdef CONFIG_CGROUP_HUGETLB /* * Minimum page order trackable by hugetlb cgroup. * At least 4 pages are necessary for all the tracking information. - * The second tail page (hpage[2]) is the fault usage cgroup. - * The third tail page (hpage[3]) is the reservation usage cgroup. + * The second tail page (hpage[SUBPAGE_INDEX_CGROUP]) is the fault + * usage cgroup. The third tail page (hpage[SUBPAGE_INDEX_CGROUP_RSVD]) + * is the reservation usage cgroup. */ -#define HUGETLB_CGROUP_MIN_ORDER 2 +#define HUGETLB_CGROUP_MIN_ORDER order_base_2(__MAX_CGROUP_SUBPAGE_INDEX + 1) -#ifdef CONFIG_CGROUP_HUGETLB enum hugetlb_memory_event { HUGETLB_MAX, HUGETLB_NR_MEMORY_EVENTS, @@ -66,9 +67,9 @@ __hugetlb_cgroup_from_page(struct page *page, bool rsvd) if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER) return NULL; if (rsvd) - return (struct hugetlb_cgroup *)page[3].private; + return (void *)page_private(page + SUBPAGE_INDEX_CGROUP_RSVD); else - return (struct hugetlb_cgroup *)page[2].private; + return (void *)page_private(page + SUBPAGE_INDEX_CGROUP); } static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page) @@ -90,9 +91,11 @@ static inline int __set_hugetlb_cgroup(struct page *page, if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER) return -1; if (rsvd) - page[3].private = (unsigned long)h_cg; + set_page_private(page + SUBPAGE_INDEX_CGROUP_RSVD, + (unsigned long)h_cg); else - page[2].private = (unsigned long)h_cg; + set_page_private(page + SUBPAGE_INDEX_CGROUP, + (unsigned long)h_cg); return 0; } diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index d1e59dbef1..2e859d2f96 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -181,6 +181,10 @@ struct hv_ring_buffer_info { * being freed while the ring buffer is being accessed. */ struct mutex ring_buffer_mutex; + + /* Buffer that holds a copy of an incoming host packet */ + void *pkt_buffer; + u32 pkt_buffer_size; }; @@ -790,7 +794,11 @@ struct vmbus_requestor { #define VMBUS_NO_RQSTOR U64_MAX #define VMBUS_RQST_ERROR (U64_MAX - 1) +/* NetVSC-specific */ #define VMBUS_RQST_ID_NO_RESPONSE (U64_MAX - 2) +/* StorVSC-specific */ +#define VMBUS_RQST_INIT (U64_MAX - 2) +#define VMBUS_RQST_RESET (U64_MAX - 3) struct vmbus_device { u16 dev_type; @@ -799,6 +807,8 @@ struct vmbus_device { bool allowed_in_isolated; }; +#define VMBUS_DEFAULT_MAX_PKT_SIZE 4096 + struct vmbus_channel { struct list_head listentry; @@ -1018,13 +1028,21 @@ struct vmbus_channel { u32 fuzz_testing_interrupt_delay; u32 fuzz_testing_message_delay; + /* callback to generate a request ID from a request address */ + u64 (*next_request_id_callback)(struct vmbus_channel *channel, u64 rqst_addr); + /* callback to retrieve a request address from a request ID */ + u64 (*request_addr_callback)(struct vmbus_channel *channel, u64 rqst_id); + /* request/transaction ids for VMBus */ struct vmbus_requestor requestor; u32 rqstor_size; + + /* The max size of a packet on this channel */ + u32 max_pkt_size; }; -u64 vmbus_next_request_id(struct vmbus_requestor *rqstor, u64 rqst_addr); -u64 vmbus_request_addr(struct vmbus_requestor *rqstor, u64 trans_id); +u64 vmbus_next_request_id(struct vmbus_channel *channel, u64 rqst_addr); +u64 vmbus_request_addr(struct vmbus_channel *channel, u64 trans_id); static inline bool is_hvsock_channel(const struct vmbus_channel *c) { @@ -1662,15 +1680,44 @@ static inline u32 hv_pkt_datalen(const struct vmpacket_descriptor *desc) } +struct vmpacket_descriptor * +hv_pkt_iter_first_raw(struct vmbus_channel *channel); + struct vmpacket_descriptor * hv_pkt_iter_first(struct vmbus_channel *channel); struct vmpacket_descriptor * __hv_pkt_iter_next(struct vmbus_channel *channel, - const struct vmpacket_descriptor *pkt); + const struct vmpacket_descriptor *pkt, + bool copy); void hv_pkt_iter_close(struct vmbus_channel *channel); +static inline struct vmpacket_descriptor * +hv_pkt_iter_next_pkt(struct vmbus_channel *channel, + const struct vmpacket_descriptor *pkt, + bool copy) +{ + struct vmpacket_descriptor *nxt; + + nxt = __hv_pkt_iter_next(channel, pkt, copy); + if (!nxt) + hv_pkt_iter_close(channel); + + return nxt; +} + +/* + * Get next packet descriptor without copying it out of the ring buffer + * If at end of list, return NULL and update host. + */ +static inline struct vmpacket_descriptor * +hv_pkt_iter_next_raw(struct vmbus_channel *channel, + const struct vmpacket_descriptor *pkt) +{ + return hv_pkt_iter_next_pkt(channel, pkt, false); +} + /* * Get next packet descriptor from iterator * If at end of list, return NULL and update host. @@ -1679,13 +1726,7 @@ static inline struct vmpacket_descriptor * hv_pkt_iter_next(struct vmbus_channel *channel, const struct vmpacket_descriptor *pkt) { - struct vmpacket_descriptor *nxt; - - nxt = __hv_pkt_iter_next(channel, pkt); - if (!nxt) - hv_pkt_iter_close(channel); - - return nxt; + return hv_pkt_iter_next_pkt(channel, pkt, true); } #define foreach_vmbus_pkt(pkt, channel) \ diff --git a/include/linux/i2c.h b/include/linux/i2c.h index e8f2ac8c9c..3eb60a2e9e 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -15,6 +15,7 @@ #include /* for struct device */ #include /* for completion */ #include +#include #include #include /* for Host Notify IRQ */ #include /* for struct device_node */ @@ -147,6 +148,7 @@ s32 __i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr, /* Now follow the 'nice' access routines. These also document the calling conventions of i2c_smbus_xfer. */ +u8 i2c_smbus_pec(u8 crc, u8 *p, size_t count); s32 i2c_smbus_read_byte(const struct i2c_client *client); s32 i2c_smbus_write_byte(const struct i2c_client *client, u8 value); s32 i2c_smbus_read_byte_data(const struct i2c_client *client, u8 command); @@ -343,7 +345,6 @@ struct i2c_client { }; #define to_i2c_client(d) container_of(d, struct i2c_client, dev) -struct i2c_client *i2c_verify_client(struct device *dev); struct i2c_adapter *i2c_verify_adapter(struct device *dev); const struct i2c_device_id *i2c_match_id(const struct i2c_device_id *id, const struct i2c_client *client); @@ -477,6 +478,13 @@ i2c_new_ancillary_device(struct i2c_client *client, u16 default_addr); void i2c_unregister_device(struct i2c_client *client); + +struct i2c_client *i2c_verify_client(struct device *dev); +#else +static inline struct i2c_client *i2c_verify_client(struct device *dev) +{ + return NULL; +} #endif /* I2C */ /* Mainboard arch_initcall() code should register all its I2C devices. @@ -729,6 +737,7 @@ struct i2c_adapter { const struct i2c_adapter_quirks *quirks; struct irq_domain *host_notify_domain; + struct regulator *bus_regulator; }; #define to_i2c_adapter(d) container_of(d, struct i2c_adapter, dev) diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 2967437f1b..a6730072d1 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -9,7 +9,7 @@ * Copyright (c) 2006, Michael Wu * Copyright (c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright (c) 2016 - 2017 Intel Deutschland GmbH - * Copyright (c) 2018 - 2020 Intel Corporation + * Copyright (c) 2018 - 2021 Intel Corporation */ #ifndef LINUX_IEEE80211_H @@ -2179,6 +2179,8 @@ int ieee80211_get_vht_max_nss(struct ieee80211_vht_cap *cap, #define IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_RESERVED 0xc0 #define IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_MASK 0xc0 +#define IEEE80211_HE_PHY_CAP10_HE_MU_M1RU_MAX_LTF 0x01 + /* 802.11ax HE TX/RX MCS NSS Support */ #define IEEE80211_TX_RX_MCS_NSS_SUPP_HIGHEST_MCS_POS (3) #define IEEE80211_TX_RX_MCS_NSS_SUPP_TX_BITMAP_POS (6) @@ -2933,6 +2935,7 @@ enum ieee80211_category { WLAN_CATEGORY_BACK = 3, WLAN_CATEGORY_PUBLIC = 4, WLAN_CATEGORY_RADIO_MEASUREMENT = 5, + WLAN_CATEGORY_FAST_BBS_TRANSITION = 6, WLAN_CATEGORY_HT = 7, WLAN_CATEGORY_SA_QUERY = 8, WLAN_CATEGORY_PROTECTED_DUAL_OF_ACTION = 9, @@ -3110,6 +3113,11 @@ enum ieee80211_tdls_actioncode { */ #define WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT BIT(6) +/* Timing Measurement protocol for time sync is set in the 7th bit of 3rd byte + * of the @WLAN_EID_EXT_CAPABILITY information element + */ +#define WLAN_EXT_CAPA3_TIMING_MEASUREMENT_SUPPORT BIT(7) + /* TDLS capabilities in the 4th byte of @WLAN_EID_EXT_CAPABILITY */ #define WLAN_EXT_CAPA4_TDLS_BUFFER_STA BIT(4) #define WLAN_EXT_CAPA4_TDLS_PEER_PSM BIT(5) diff --git a/include/linux/if_arp.h b/include/linux/if_arp.h index bf5c5f32c6..b712217f70 100644 --- a/include/linux/if_arp.h +++ b/include/linux/if_arp.h @@ -48,6 +48,7 @@ static inline bool dev_is_mac_header_xmit(const struct net_device *dev) case ARPHRD_TUNNEL6: case ARPHRD_SIT: case ARPHRD_IPGRE: + case ARPHRD_IP6GRE: case ARPHRD_VOID: case ARPHRD_NONE: case ARPHRD_RAWIP: diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h index 2cc35038a8..b651c5e32a 100644 --- a/include/linux/if_bridge.h +++ b/include/linux/if_bridge.h @@ -67,10 +67,12 @@ int br_multicast_list_adjacent(struct net_device *dev, struct list_head *br_ip_list); bool br_multicast_has_querier_anywhere(struct net_device *dev, int proto); bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto); +bool br_multicast_has_router_adjacent(struct net_device *dev, int proto); bool br_multicast_enabled(const struct net_device *dev); bool br_multicast_router(const struct net_device *dev); int br_mdb_replay(struct net_device *br_dev, struct net_device *dev, - struct notifier_block *nb, struct netlink_ext_ack *extack); + const void *ctx, bool adding, struct notifier_block *nb, + struct netlink_ext_ack *extack); #else static inline int br_multicast_list_adjacent(struct net_device *dev, struct list_head *br_ip_list) @@ -87,6 +89,13 @@ static inline bool br_multicast_has_querier_adjacent(struct net_device *dev, { return false; } + +static inline bool br_multicast_has_router_adjacent(struct net_device *dev, + int proto) +{ + return true; +} + static inline bool br_multicast_enabled(const struct net_device *dev) { return false; @@ -95,9 +104,9 @@ static inline bool br_multicast_router(const struct net_device *dev) { return false; } -static inline int br_mdb_replay(struct net_device *br_dev, - struct net_device *dev, - struct notifier_block *nb, +static inline int br_mdb_replay(const struct net_device *br_dev, + const struct net_device *dev, const void *ctx, + bool adding, struct notifier_block *nb, struct netlink_ext_ack *extack) { return -EOPNOTSUPP; @@ -112,7 +121,8 @@ int br_vlan_get_proto(const struct net_device *dev, u16 *p_proto); int br_vlan_get_info(const struct net_device *dev, u16 vid, struct bridge_vlan_info *p_vinfo); int br_vlan_replay(struct net_device *br_dev, struct net_device *dev, - struct notifier_block *nb, struct netlink_ext_ack *extack); + const void *ctx, bool adding, struct notifier_block *nb, + struct netlink_ext_ack *extack); #else static inline bool br_vlan_enabled(const struct net_device *dev) { @@ -141,8 +151,8 @@ static inline int br_vlan_get_info(const struct net_device *dev, u16 vid, } static inline int br_vlan_replay(struct net_device *br_dev, - struct net_device *dev, - struct notifier_block *nb, + struct net_device *dev, const void *ctx, + bool adding, struct notifier_block *nb, struct netlink_ext_ack *extack) { return -EOPNOTSUPP; @@ -156,9 +166,9 @@ struct net_device *br_fdb_find_port(const struct net_device *br_dev, void br_fdb_clear_offload(const struct net_device *dev, u16 vid); bool br_port_flag_is_set(const struct net_device *dev, unsigned long flag); u8 br_port_get_stp_state(const struct net_device *dev); -clock_t br_get_ageing_time(struct net_device *br_dev); -int br_fdb_replay(struct net_device *br_dev, struct net_device *dev, - struct notifier_block *nb); +clock_t br_get_ageing_time(const struct net_device *br_dev); +int br_fdb_replay(const struct net_device *br_dev, const struct net_device *dev, + const void *ctx, bool adding, struct notifier_block *nb); #else static inline struct net_device * br_fdb_find_port(const struct net_device *br_dev, @@ -183,14 +193,14 @@ static inline u8 br_port_get_stp_state(const struct net_device *dev) return BR_STATE_DISABLED; } -static inline clock_t br_get_ageing_time(struct net_device *br_dev) +static inline clock_t br_get_ageing_time(const struct net_device *br_dev) { return 0; } -static inline int br_fdb_replay(struct net_device *br_dev, - struct net_device *dev, - struct notifier_block *nb) +static inline int br_fdb_replay(const struct net_device *br_dev, + const struct net_device *dev, const void *ctx, + bool adding, struct notifier_block *nb) { return -EOPNOTSUPP; } diff --git a/include/linux/if_rmnet.h b/include/linux/if_rmnet.h index 4efb537f57..10e7521ecb 100644 --- a/include/linux/if_rmnet.h +++ b/include/linux/if_rmnet.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-only - * Copyright (c) 2013-2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2013-2019, 2021 The Linux Foundation. All rights reserved. */ #ifndef _LINUX_IF_RMNET_H_ @@ -12,10 +12,12 @@ struct rmnet_map_header { } __aligned(1); /* rmnet_map_header flags field: - * PAD_LEN: number of pad bytes following packet data - * CMD: 1 = packet contains a MAP command; 0 = packet contains data + * PAD_LEN: number of pad bytes following packet data + * CMD: 1 = packet contains a MAP command; 0 = packet contains data + * NEXT_HEADER: 1 = packet contains V5 CSUM header 0 = no V5 CSUM header */ #define MAP_PAD_LEN_MASK GENMASK(5, 0) +#define MAP_NEXT_HEADER_FLAG BIT(6) #define MAP_CMD_FLAG BIT(7) struct rmnet_map_dl_csum_trailer { @@ -23,7 +25,7 @@ struct rmnet_map_dl_csum_trailer { u8 flags; /* MAP_CSUM_DL_VALID_FLAG */ __be16 csum_start_offset; __be16 csum_length; - __be16 csum_value; + __sum16 csum_value; } __aligned(1); /* rmnet_map_dl_csum_trailer flags field: @@ -45,4 +47,26 @@ struct rmnet_map_ul_csum_header { #define MAP_CSUM_UL_UDP_FLAG BIT(14) #define MAP_CSUM_UL_ENABLED_FLAG BIT(15) +/* MAP CSUM headers */ +struct rmnet_map_v5_csum_header { + u8 header_info; + u8 csum_info; + __be16 reserved; +} __aligned(1); + +/* v5 header_info field + * NEXT_HEADER: represents whether there is any next header + * HEADER_TYPE: represents the type of this header + * + * csum_info field + * CSUM_VALID_OR_REQ: + * 1 = for UL, checksum computation is requested. + * 1 = for DL, validated the checksum and has found it valid + */ + +#define MAPV5_HDRINFO_NXT_HDR_FLAG BIT(0) +#define MAPV5_HDRINFO_HDR_TYPE_FMASK GENMASK(7, 1) +#define MAPV5_CSUMINFO_VALID_FLAG BIT(7) + +#define RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD 2 #endif /* !(_LINUX_IF_RMNET_H_) */ diff --git a/include/linux/iio/adc/ad_sigma_delta.h b/include/linux/iio/adc/ad_sigma_delta.h index 7199280d89..c525fd5165 100644 --- a/include/linux/iio/adc/ad_sigma_delta.h +++ b/include/linux/iio/adc/ad_sigma_delta.h @@ -26,6 +26,7 @@ struct ad_sd_calib_data { }; struct ad_sigma_delta; +struct device; struct iio_dev; /** @@ -132,8 +133,7 @@ int ad_sd_calibrate_all(struct ad_sigma_delta *sigma_delta, int ad_sd_init(struct ad_sigma_delta *sigma_delta, struct iio_dev *indio_dev, struct spi_device *spi, const struct ad_sigma_delta_info *info); -int ad_sd_setup_buffer_and_trigger(struct iio_dev *indio_dev); -void ad_sd_cleanup_buffer_and_trigger(struct iio_dev *indio_dev); +int devm_ad_sd_setup_buffer_and_trigger(struct device *dev, struct iio_dev *indio_dev); int ad_sd_validate_trigger(struct iio_dev *indio_dev, struct iio_trigger *trig); diff --git a/include/linux/iio/common/cros_ec_sensors_core.h b/include/linux/iio/common/cros_ec_sensors_core.h index 7ce8a8adad..c582e1a142 100644 --- a/include/linux/iio/common/cros_ec_sensors_core.h +++ b/include/linux/iio/common/cros_ec_sensors_core.h @@ -77,7 +77,7 @@ struct cros_ec_sensors_core_state { u16 scale; } calib[CROS_EC_SENSOR_MAX_AXIS]; s8 sign[CROS_EC_SENSOR_MAX_AXIS]; - u8 samples[CROS_EC_SAMPLE_SIZE]; + u8 samples[CROS_EC_SAMPLE_SIZE] __aligned(8); int (*read_ec_sensors_data)(struct iio_dev *indio_dev, unsigned long scan_mask, s16 *data); diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h index 33e9399774..8bdbaf3f37 100644 --- a/include/linux/iio/common/st_sensors.h +++ b/include/linux/iio/common/st_sensors.h @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include @@ -20,6 +21,8 @@ #include +#define LSM9DS0_IMU_DEV_NAME "lsm9ds0" + /* * Buffer size max case: 2bytes per channel, 3 channels in total + * 8bytes timestamp channel (s64) @@ -46,8 +49,8 @@ #define ST_SENSORS_MAX_NAME 17 #define ST_SENSORS_MAX_4WAI 8 -#define ST_SENSORS_LSM_CHANNELS(device_type, mask, index, mod, \ - ch2, s, endian, rbits, sbits, addr) \ +#define ST_SENSORS_LSM_CHANNELS_EXT(device_type, mask, index, mod, \ + ch2, s, endian, rbits, sbits, addr, ext) \ { \ .type = device_type, \ .modified = mod, \ @@ -63,8 +66,14 @@ .storagebits = sbits, \ .endianness = endian, \ }, \ + .ext_info = ext, \ } +#define ST_SENSORS_LSM_CHANNELS(device_type, mask, index, mod, \ + ch2, s, endian, rbits, sbits, addr) \ + ST_SENSORS_LSM_CHANNELS_EXT(device_type, mask, index, mod, \ + ch2, s, endian, rbits, sbits, addr, NULL) + #define ST_SENSORS_DEV_ATTR_SAMP_FREQ_AVAIL() \ IIO_DEV_ATTR_SAMP_FREQ_AVAIL( \ st_sensors_sysfs_sampling_frequency_avail) @@ -213,6 +222,7 @@ struct st_sensor_settings { * struct st_sensor_data - ST sensor device status * @dev: Pointer to instance of struct device (I2C or SPI). * @trig: The trigger in use by the core driver. + * @mount_matrix: The mounting matrix of the sensor. * @sensor_settings: Pointer to the specific sensor settings in use. * @current_fullscale: Maximum range of measure by the sensor. * @vdd: Pointer to sensor's Vdd power supply @@ -232,7 +242,7 @@ struct st_sensor_settings { struct st_sensor_data { struct device *dev; struct iio_trigger *trig; - struct iio_mount_matrix *mount_matrix; + struct iio_mount_matrix mount_matrix; struct st_sensor_settings *sensor_settings; struct st_sensor_fullscale_avl *current_fullscale; struct regulator *vdd; @@ -317,4 +327,24 @@ ssize_t st_sensors_sysfs_scale_avail(struct device *dev, void st_sensors_dev_name_probe(struct device *dev, char *name, int len); +/* Accelerometer */ +const struct st_sensor_settings *st_accel_get_settings(const char *name); +int st_accel_common_probe(struct iio_dev *indio_dev); +void st_accel_common_remove(struct iio_dev *indio_dev); + +/* Gyroscope */ +const struct st_sensor_settings *st_gyro_get_settings(const char *name); +int st_gyro_common_probe(struct iio_dev *indio_dev); +void st_gyro_common_remove(struct iio_dev *indio_dev); + +/* Magnetometer */ +const struct st_sensor_settings *st_magn_get_settings(const char *name); +int st_magn_common_probe(struct iio_dev *indio_dev); +void st_magn_common_remove(struct iio_dev *indio_dev); + +/* Pressure */ +const struct st_sensor_settings *st_press_get_settings(const char *name); +int st_press_common_probe(struct iio_dev *indio_dev); +void st_press_common_remove(struct iio_dev *indio_dev); + #endif /* ST_SENSORS_H */ diff --git a/include/linux/iio/iio-opaque.h b/include/linux/iio/iio-opaque.h index 32addd5e79..c9504e9da5 100644 --- a/include/linux/iio/iio-opaque.h +++ b/include/linux/iio/iio-opaque.h @@ -6,6 +6,10 @@ /** * struct iio_dev_opaque - industrial I/O device opaque information * @indio_dev: public industrial I/O device information + * @id: used to identify device internally + * @driver_module: used to make it harder to undercut users + * @info_exist_lock: lock to prevent use during removal + * @trig_readonly: mark the current trigger immutable * @event_interface: event chrdevs associated with interrupt lines * @attached_buffers: array of buffers statically attached by the driver * @attached_buffers_cnt: number of buffers in the array of statically attached buffers @@ -19,6 +23,10 @@ * @groupcounter: index of next attribute group * @legacy_scan_el_group: attribute group for legacy scan elements attribute group * @legacy_buffer_group: attribute group for legacy buffer attributes group + * @scan_index_timestamp: cache of the index to the timestamp + * @clock_id: timestamping clock posix identifier + * @chrdev: associated character device + * @flags: file ops related flags including busy flag. * @debugfs_dentry: device specific debugfs dentry * @cached_reg_addr: cached register address for debugfs reads * @read_buf: read buffer to be used for the initial reg read @@ -26,6 +34,10 @@ */ struct iio_dev_opaque { struct iio_dev indio_dev; + int id; + struct module *driver_module; + struct mutex info_exist_lock; + bool trig_readonly; struct iio_event_interface *event_interface; struct iio_buffer **attached_buffers; unsigned int attached_buffers_cnt; @@ -38,6 +50,12 @@ struct iio_dev_opaque { int groupcounter; struct attribute_group legacy_scan_el_group; struct attribute_group legacy_buffer_group; + + unsigned int scan_index_timestamp; + clockid_t clock_id; + struct cdev chrdev; + unsigned long flags; + #if defined(CONFIG_DEBUG_FS) struct dentry *debugfs_dentry; unsigned cached_reg_addr; @@ -46,7 +64,7 @@ struct iio_dev_opaque { #endif }; -#define to_iio_dev_opaque(indio_dev) \ - container_of(indio_dev, struct iio_dev_opaque, indio_dev) +#define to_iio_dev_opaque(_indio_dev) \ + container_of((_indio_dev), struct iio_dev_opaque, indio_dev) #endif diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h index f2d65e2e88..324561b7a5 100644 --- a/include/linux/iio/iio.h +++ b/include/linux/iio/iio.h @@ -127,8 +127,7 @@ struct iio_mount_matrix { ssize_t iio_show_mount_matrix(struct iio_dev *indio_dev, uintptr_t priv, const struct iio_chan_spec *chan, char *buf); -int iio_read_mount_matrix(struct device *dev, const char *propname, - struct iio_mount_matrix *matrix); +int iio_read_mount_matrix(struct device *dev, struct iio_mount_matrix *matrix); typedef const struct iio_mount_matrix * (iio_get_mount_matrix_t)(const struct iio_dev *indio_dev, @@ -488,8 +487,6 @@ struct iio_buffer_setup_ops { /** * struct iio_dev - industrial I/O device - * @id: [INTERN] used to identify device internally - * @driver_module: [INTERN] used to make it harder to undercut users * @modes: [DRIVER] operating modes supported by device * @currentmode: [DRIVER] current operating mode * @dev: [DRIVER] device structure, should be assigned a parent @@ -503,9 +500,7 @@ struct iio_buffer_setup_ops { * channels * @active_scan_mask: [INTERN] union of all scan masks requested by buffers * @scan_timestamp: [INTERN] set if any buffers have requested timestamp - * @scan_index_timestamp:[INTERN] cache of the index to the timestamp * @trig: [INTERN] current device trigger (buffer modes) - * @trig_readonly: [INTERN] mark the current trigger immutable * @pollfunc: [DRIVER] function run on trigger being received * @pollfunc_event: [DRIVER] function run on events trigger being received * @channels: [DRIVER] channel specification structure table @@ -513,19 +508,12 @@ struct iio_buffer_setup_ops { * @name: [DRIVER] name of the device. * @label: [DRIVER] unique name to identify which device this is * @info: [DRIVER] callbacks and constant info from driver - * @clock_id: [INTERN] timestamping clock posix identifier - * @info_exist_lock: [INTERN] lock to prevent use during removal * @setup_ops: [DRIVER] callbacks to call before and after buffer * enable/disable - * @chrdev: [INTERN] associated character device - * @flags: [INTERN] file ops related flags including busy flag. * @priv: [DRIVER] reference to driver's private information * **MUST** be accessed **ONLY** via iio_priv() helper */ struct iio_dev { - int id; - struct module *driver_module; - int modes; int currentmode; struct device dev; @@ -538,9 +526,7 @@ struct iio_dev { unsigned masklength; const unsigned long *active_scan_mask; bool scan_timestamp; - unsigned scan_index_timestamp; struct iio_trigger *trig; - bool trig_readonly; struct iio_poll_func *pollfunc; struct iio_poll_func *pollfunc_event; @@ -550,15 +536,13 @@ struct iio_dev { const char *name; const char *label; const struct iio_info *info; - clockid_t clock_id; - struct mutex info_exist_lock; const struct iio_buffer_setup_ops *setup_ops; - struct cdev chrdev; - unsigned long flags; void *priv; }; +int iio_device_id(struct iio_dev *indio_dev); + const struct iio_chan_spec *iio_find_channel_from_si(struct iio_dev *indio_dev, int si); /** @@ -602,15 +586,7 @@ static inline void iio_device_put(struct iio_dev *indio_dev) put_device(&indio_dev->dev); } -/** - * iio_device_get_clock() - Retrieve current timestamping clock for the device - * @indio_dev: IIO device structure containing the device - */ -static inline clockid_t iio_device_get_clock(const struct iio_dev *indio_dev) -{ - return indio_dev->clock_id; -} - +clockid_t iio_device_get_clock(const struct iio_dev *indio_dev); int iio_device_set_clock(struct iio_dev *indio_dev, clockid_t clock_id); /** diff --git a/include/linux/iio/imu/adis.h b/include/linux/iio/imu/adis.h index f9b728d490..cf49997d59 100644 --- a/include/linux/iio/imu/adis.h +++ b/include/linux/iio/imu/adis.h @@ -55,6 +55,7 @@ struct adis_timeout { * this should be the minimum size supported by the device. * @burst_max_len: Holds the maximum burst size when the device supports * more than one burst mode with different sizes + * @burst_max_speed_hz: Maximum spi speed that can be used in burst mode */ struct adis_data { unsigned int read_delay; @@ -83,6 +84,7 @@ struct adis_data { unsigned int burst_reg_cmd; unsigned int burst_len; unsigned int burst_max_len; + unsigned int burst_max_speed_hz; }; /** diff --git a/include/linux/input/cy8ctmg110_pdata.h b/include/linux/input/cy8ctmg110_pdata.h index 77582ae174..ee1d44545f 100644 --- a/include/linux/input/cy8ctmg110_pdata.h +++ b/include/linux/input/cy8ctmg110_pdata.h @@ -5,7 +5,6 @@ struct cy8ctmg110_pdata { int reset_pin; /* Reset pin is wired to this GPIO (optional) */ - int irq_pin; /* IRQ pin is wired to this GPIO */ }; #endif diff --git a/include/linux/instrumentation.h b/include/linux/instrumentation.h index 93e2ad67fc..fa2cd8c63d 100644 --- a/include/linux/instrumentation.h +++ b/include/linux/instrumentation.h @@ -4,13 +4,16 @@ #if defined(CONFIG_DEBUG_ENTRY) && defined(CONFIG_STACK_VALIDATION) +#include + /* Begin/end of an instrumentation safe region */ -#define instrumentation_begin() ({ \ - asm volatile("%c0: nop\n\t" \ +#define __instrumentation_begin(c) ({ \ + asm volatile(__stringify(c) ": nop\n\t" \ ".pushsection .discard.instr_begin\n\t" \ - ".long %c0b - .\n\t" \ - ".popsection\n\t" : : "i" (__COUNTER__)); \ + ".long " __stringify(c) "b - .\n\t" \ + ".popsection\n\t"); \ }) +#define instrumentation_begin() __instrumentation_begin(__COUNTER__) /* * Because instrumentation_{begin,end}() can nest, objtool validation considers @@ -43,12 +46,13 @@ * To avoid this, have _end() be a NOP instruction, this ensures it will be * part of the condition block and does not escape. */ -#define instrumentation_end() ({ \ - asm volatile("%c0: nop\n\t" \ +#define __instrumentation_end(c) ({ \ + asm volatile(__stringify(c) ": nop\n\t" \ ".pushsection .discard.instr_end\n\t" \ - ".long %c0b - .\n\t" \ - ".popsection\n\t" : : "i" (__COUNTER__)); \ + ".long " __stringify(c) "b - .\n\t" \ + ".popsection\n\t"); \ }) +#define instrumentation_end() __instrumentation_end(__COUNTER__) #else # define instrumentation_begin() do { } while(0) # define instrumentation_end() do { } while(0) diff --git a/include/linux/integrity.h b/include/linux/integrity.h index 2271939c5c..2ea0f2f65a 100644 --- a/include/linux/integrity.h +++ b/include/linux/integrity.h @@ -13,6 +13,7 @@ enum integrity_status { INTEGRITY_PASS = 0, INTEGRITY_PASS_IMMUTABLE, INTEGRITY_FAIL, + INTEGRITY_FAIL_IMMUTABLE, INTEGRITY_NOLABEL, INTEGRITY_NOXATTRS, INTEGRITY_UNKNOWN, diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 03faf20a68..d0fa0b3199 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -537,7 +537,7 @@ struct context_entry { struct dmar_domain { int nid; /* node id */ - unsigned iommu_refcnt[DMAR_UNITS_SUPPORTED]; + unsigned int iommu_refcnt[DMAR_UNITS_SUPPORTED]; /* Refcount of devices per iommu */ @@ -546,7 +546,10 @@ struct dmar_domain { * domain ids are 16 bit wide according * to VT-d spec, section 9.3 */ - bool has_iotlb_device; + u8 has_iotlb_device: 1; + u8 iommu_coherency: 1; /* indicate coherency of iommu access */ + u8 iommu_snooping: 1; /* indicate snooping control feature */ + struct list_head devices; /* all devices' list */ struct list_head subdevices; /* all subdevices' list */ struct iova_domain iovad; /* iova's that belong to this domain */ @@ -558,10 +561,6 @@ struct dmar_domain { int agaw; int flags; /* flags to find out type of domain */ - - int iommu_coherency;/* indicate coherency of iommu access */ - int iommu_snooping; /* indicate snooping control feature*/ - int iommu_count; /* reference count of iommu */ int iommu_superpage;/* Level of superpages supported: 0 == 4KiB (no superpages), 1 == 2MiB, 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */ @@ -606,6 +605,8 @@ struct intel_iommu { struct completion prq_complete; struct ioasid_allocator_ops pasid_allocator; /* Custom allocator for PASIDs */ #endif + struct iopf_queue *iopf_queue; + unsigned char iopfq_name[16]; struct q_inval *qi; /* Queued invalidation info */ u32 *iommu_state; /* Store iommu states between suspend and resume.*/ @@ -619,6 +620,7 @@ struct intel_iommu { u32 flags; /* Software defined flags */ struct dmar_drhd_unit *drhd; + void *perf_statistic; }; /* Per subdevice private data */ @@ -776,6 +778,7 @@ struct intel_svm_dev { struct device *dev; struct intel_iommu *iommu; struct iommu_sva sva; + unsigned long prq_seq_number; u32 pasid; int users; u16 did; @@ -791,7 +794,6 @@ struct intel_svm { u32 pasid; int gpasid; /* In case that guest PASID is different from host PASID */ struct list_head devs; - struct list_head list; }; #else static inline void intel_svm_check(struct intel_iommu *iommu) {} @@ -827,4 +829,32 @@ static inline int iommu_calculate_max_sagaw(struct intel_iommu *iommu) #define intel_iommu_enabled (0) #endif +static inline const char *decode_prq_descriptor(char *str, size_t size, + u64 dw0, u64 dw1, u64 dw2, u64 dw3) +{ + char *buf = str; + int bytes; + + bytes = snprintf(buf, size, + "rid=0x%llx addr=0x%llx %c%c%c%c%c pasid=0x%llx index=0x%llx", + FIELD_GET(GENMASK_ULL(31, 16), dw0), + FIELD_GET(GENMASK_ULL(63, 12), dw1), + dw1 & BIT_ULL(0) ? 'r' : '-', + dw1 & BIT_ULL(1) ? 'w' : '-', + dw0 & BIT_ULL(52) ? 'x' : '-', + dw0 & BIT_ULL(53) ? 'p' : '-', + dw1 & BIT_ULL(2) ? 'l' : '-', + FIELD_GET(GENMASK_ULL(51, 32), dw0), + FIELD_GET(GENMASK_ULL(11, 3), dw1)); + + /* Private Data */ + if (dw0 & BIT_ULL(9)) { + size -= bytes; + buf += bytes; + snprintf(buf, size, " private=0x%llx/0x%llx\n", dw2, dw3); + } + + return str; +} + #endif diff --git a/include/linux/intel-ish-client-if.h b/include/linux/intel-ish-client-if.h index 0d6b4bc191..25e2b4e805 100644 --- a/include/linux/intel-ish-client-if.h +++ b/include/linux/intel-ish-client-if.h @@ -8,11 +8,17 @@ #ifndef _INTEL_ISH_CLIENT_IF_H_ #define _INTEL_ISH_CLIENT_IF_H_ +#include +#include + struct ishtp_cl_device; struct ishtp_device; struct ishtp_cl; struct ishtp_fw_client; +typedef __printf(2, 3) void (*ishtp_print_log)(struct ishtp_device *dev, + const char *format, ...); + /* Client state */ enum cl_state { ISHTP_CL_INITIALIZING = 0, @@ -36,7 +42,7 @@ struct ishtp_cl_driver { const char *name; const guid_t *guid; int (*probe)(struct ishtp_cl_device *dev); - int (*remove)(struct ishtp_cl_device *dev); + void (*remove)(struct ishtp_cl_device *dev); int (*reset)(struct ishtp_cl_device *dev); const struct dev_pm_ops *pm; }; @@ -76,7 +82,7 @@ int ishtp_register_event_cb(struct ishtp_cl_device *device, /* Get the device * from ishtp device instance */ struct device *ishtp_device(struct ishtp_cl_device *cl_device); /* Trace interface for clients */ -void *ishtp_trace_callback(struct ishtp_cl_device *cl_device); +ishtp_print_log ishtp_trace_callback(struct ishtp_cl_device *cl_device); /* Get device pointer of PCI device for DMA acces */ struct device *ishtp_get_pci_device(struct ishtp_cl_device *cl_device); diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 4777850a6d..2ed65b01c9 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -64,6 +64,8 @@ * IRQF_NO_AUTOEN - Don't enable IRQ or NMI automatically when users request it. * Users will enable it explicitly by enable_irq() or enable_nmi() * later. + * IRQF_NO_DEBUG - Exclude from runnaway detection for IPI and similar handlers, + * depends on IRQF_PERCPU. */ #define IRQF_SHARED 0x00000080 #define IRQF_PROBE_SHARED 0x00000100 @@ -78,6 +80,7 @@ #define IRQF_EARLY_RESUME 0x00020000 #define IRQF_COND_SUSPEND 0x00040000 #define IRQF_NO_AUTOEN 0x00080000 +#define IRQF_NO_DEBUG 0x00100000 #define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD) @@ -319,39 +322,8 @@ struct irq_affinity_desc { extern cpumask_var_t irq_default_affinity; -/* Internal implementation. Use the helpers below */ -extern int __irq_set_affinity(unsigned int irq, const struct cpumask *cpumask, - bool force); - -/** - * irq_set_affinity - Set the irq affinity of a given irq - * @irq: Interrupt to set affinity - * @cpumask: cpumask - * - * Fails if cpumask does not contain an online CPU - */ -static inline int -irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) -{ - return __irq_set_affinity(irq, cpumask, false); -} - -/** - * irq_force_affinity - Force the irq affinity of a given irq - * @irq: Interrupt to set affinity - * @cpumask: cpumask - * - * Same as irq_set_affinity, but without checking the mask against - * online cpus. - * - * Solely for low level cpu hotplug code, where we need to make per - * cpu interrupts affine before the cpu becomes online. - */ -static inline int -irq_force_affinity(unsigned int irq, const struct cpumask *cpumask) -{ - return __irq_set_affinity(irq, cpumask, true); -} +extern int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask); +extern int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask); extern int irq_can_set_affinity(unsigned int irq); extern int irq_select_affinity(unsigned int irq); diff --git a/include/linux/iomap.h b/include/linux/iomap.h index c87d0cb0de..479c1da3e2 100644 --- a/include/linux/iomap.h +++ b/include/linux/iomap.h @@ -159,7 +159,6 @@ ssize_t iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *from, const struct iomap_ops *ops); int iomap_readpage(struct page *page, const struct iomap_ops *ops); void iomap_readahead(struct readahead_control *, const struct iomap_ops *ops); -int iomap_set_page_dirty(struct page *page); int iomap_is_partially_uptodate(struct page *page, unsigned long from, unsigned long count); int iomap_releasepage(struct page *page, gfp_t gfp_mask); diff --git a/include/linux/irq.h b/include/linux/irq.h index 31b347c9f8..8e9a9ae471 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -72,6 +72,7 @@ enum irqchip_irq_state; * mechanism and from core side polling. * IRQ_DISABLE_UNLAZY - Disable lazy irq disable * IRQ_HIDDEN - Don't show up in /proc/interrupts + * IRQ_NO_DEBUG - Exclude from note_interrupt() debugging */ enum { IRQ_TYPE_NONE = 0x00000000, @@ -99,6 +100,7 @@ enum { IRQ_IS_POLLED = (1 << 18), IRQ_DISABLE_UNLAZY = (1 << 19), IRQ_HIDDEN = (1 << 20), + IRQ_NO_DEBUG = (1 << 21), }; #define IRQF_MODIFY_MASK \ diff --git a/include/linux/irqchip/arm-gic-common.h b/include/linux/irqchip/arm-gic-common.h index fa8c0455c3..1177f3a1ae 100644 --- a/include/linux/irqchip/arm-gic-common.h +++ b/include/linux/irqchip/arm-gic-common.h @@ -7,8 +7,7 @@ #ifndef __LINUX_IRQCHIP_ARM_GIC_COMMON_H #define __LINUX_IRQCHIP_ARM_GIC_COMMON_H -#include -#include +#include #define GICD_INT_DEF_PRI 0xa0 #define GICD_INT_DEF_PRI_X4 ((GICD_INT_DEF_PRI << 24) |\ @@ -16,28 +15,6 @@ (GICD_INT_DEF_PRI << 8) |\ GICD_INT_DEF_PRI) -enum gic_type { - GIC_V2, - GIC_V3, -}; - -struct gic_kvm_info { - /* GIC type */ - enum gic_type type; - /* Virtual CPU interface */ - struct resource vcpu; - /* Interrupt number */ - unsigned int maint_irq; - /* Virtual control interface */ - struct resource vctrl; - /* vlpi support */ - bool has_v4; - /* rvpeid support */ - bool has_v4_1; -}; - -const struct gic_kvm_info *gic_get_kvm_info(void); - struct irq_domain; struct fwnode_handle; int gicv2m_init(struct fwnode_handle *parent_handle, diff --git a/include/linux/irqchip/arm-vgic-info.h b/include/linux/irqchip/arm-vgic-info.h new file mode 100644 index 0000000000..a75b2c7de6 --- /dev/null +++ b/include/linux/irqchip/arm-vgic-info.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * include/linux/irqchip/arm-vgic-info.h + * + * Copyright (C) 2016 ARM Limited, All Rights Reserved. + */ +#ifndef __LINUX_IRQCHIP_ARM_VGIC_INFO_H +#define __LINUX_IRQCHIP_ARM_VGIC_INFO_H + +#include +#include + +enum gic_type { + /* Full GICv2 */ + GIC_V2, + /* Full GICv3, optionally with v2 compat */ + GIC_V3, +}; + +struct gic_kvm_info { + /* GIC type */ + enum gic_type type; + /* Virtual CPU interface */ + struct resource vcpu; + /* Interrupt number */ + unsigned int maint_irq; + /* No interrupt mask, no need to use the above field */ + bool no_maint_irq_mask; + /* Virtual control interface */ + struct resource vctrl; + /* vlpi support */ + bool has_v4; + /* rvpeid support */ + bool has_v4_1; + /* Deactivation impared, subpar stuff */ + bool no_hw_deactivation; +}; + +#ifdef CONFIG_KVM +void vgic_set_kvm_info(const struct gic_kvm_info *info); +#else +static inline void vgic_set_kvm_info(const struct gic_kvm_info *info) {} +#endif + +#endif diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h index 891b323266..59aea39785 100644 --- a/include/linux/irqdesc.h +++ b/include/linux/irqdesc.h @@ -32,7 +32,7 @@ struct pt_regs; * @last_unhandled: aging timer for unhandled count * @irqs_unhandled: stats field for spurious unhandled interrupts * @threads_handled: stats field for deferred spurious detection of threaded handlers - * @threads_handled_last: comparator field for deferred spurious detection of theraded handlers + * @threads_handled_last: comparator field for deferred spurious detection of threaded handlers * @lock: locking for SMP * @affinity_hint: hint to user space for preferred irq affinity * @affinity_notify: context for notification of affinity changes @@ -158,25 +158,21 @@ static inline void generic_handle_irq_desc(struct irq_desc *desc) desc->handle_irq(desc); } +int handle_irq_desc(struct irq_desc *desc); int generic_handle_irq(unsigned int irq); -#ifdef CONFIG_HANDLE_DOMAIN_IRQ +#ifdef CONFIG_IRQ_DOMAIN /* * Convert a HW interrupt number to a logical one using a IRQ domain, * and handle the result interrupt number. Return -EINVAL if - * conversion failed. Providing a NULL domain indicates that the - * conversion has already been done. + * conversion failed. */ -int __handle_domain_irq(struct irq_domain *domain, unsigned int hwirq, - bool lookup, struct pt_regs *regs); +int generic_handle_domain_irq(struct irq_domain *domain, unsigned int hwirq); -static inline int handle_domain_irq(struct irq_domain *domain, - unsigned int hwirq, struct pt_regs *regs) -{ - return __handle_domain_irq(domain, hwirq, true, regs); -} +#ifdef CONFIG_HANDLE_DOMAIN_IRQ +int handle_domain_irq(struct irq_domain *domain, + unsigned int hwirq, struct pt_regs *regs); -#ifdef CONFIG_IRQ_DOMAIN int handle_domain_nmi(struct irq_domain *domain, unsigned int hwirq, struct pt_regs *regs); #endif diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h index 62a8e3d238..23e4ee5235 100644 --- a/include/linux/irqdomain.h +++ b/include/linux/irqdomain.h @@ -41,13 +41,11 @@ struct fwnode_handle; struct irq_domain; struct irq_chip; struct irq_data; +struct irq_desc; struct cpumask; struct seq_file; struct irq_affinity_desc; -/* Number of irqs reserved for a legacy isa controller */ -#define NUM_ISA_INTERRUPTS 16 - #define IRQ_DOMAIN_IRQ_SPEC_PARAMS 16 /** @@ -152,11 +150,10 @@ struct irq_domain_chip_generic; * @parent: Pointer to parent irq_domain to support hierarchy irq_domains * * Revmap data, used internally by irq_domain - * @revmap_direct_max_irq: The largest hwirq that can be set for controllers that - * support direct mapping - * @revmap_size: Size of the linear map table @linear_revmap[] + * @revmap_size: Size of the linear map table @revmap[] * @revmap_tree: Radix map tree for hwirqs that don't fit in the linear map - * @linear_revmap: Linear table of hwirq->virq reverse mappings + * @revmap_mutex: Lock for the revmap + * @revmap: Linear table of irq_data pointers */ struct irq_domain { struct list_head link; @@ -176,11 +173,10 @@ struct irq_domain { /* reverse map data. The linear map gets appended to the irq_domain */ irq_hw_number_t hwirq_max; - unsigned int revmap_direct_max_irq; unsigned int revmap_size; struct radix_tree_root revmap_tree; - struct mutex revmap_tree_mutex; - unsigned int linear_revmap[]; + struct mutex revmap_mutex; + struct irq_data __rcu *revmap[]; }; /* Irq domain flags */ @@ -210,6 +206,9 @@ enum { */ IRQ_DOMAIN_MSI_NOMASK_QUIRK = (1 << 6), + /* Irq domain doesn't translate anything */ + IRQ_DOMAIN_FLAG_NO_MAP = (1 << 7), + /* * Flags starting from IRQ_DOMAIN_FLAG_NONCORE are reserved * for implementation specific purposes and ignored by the @@ -348,6 +347,8 @@ static inline struct irq_domain *irq_domain_add_linear(struct device_node *of_no { return __irq_domain_add(of_node_to_fwnode(of_node), size, size, 0, ops, host_data); } + +#ifdef CONFIG_IRQ_DOMAIN_NOMAP static inline struct irq_domain *irq_domain_add_nomap(struct device_node *of_node, unsigned int max_irq, const struct irq_domain_ops *ops, @@ -355,14 +356,10 @@ static inline struct irq_domain *irq_domain_add_nomap(struct device_node *of_nod { return __irq_domain_add(of_node_to_fwnode(of_node), 0, max_irq, max_irq, ops, host_data); } -static inline struct irq_domain *irq_domain_add_legacy_isa( - struct device_node *of_node, - const struct irq_domain_ops *ops, - void *host_data) -{ - return irq_domain_add_legacy(of_node, NUM_ISA_INTERRUPTS, 0, 0, ops, - host_data); -} + +extern unsigned int irq_create_direct_mapping(struct irq_domain *host); +#endif + static inline struct irq_domain *irq_domain_add_tree(struct device_node *of_node, const struct irq_domain_ops *ops, void *host_data) @@ -405,25 +402,37 @@ static inline unsigned int irq_create_mapping(struct irq_domain *host, return irq_create_mapping_affinity(host, hwirq, NULL); } +extern struct irq_desc *__irq_resolve_mapping(struct irq_domain *domain, + irq_hw_number_t hwirq, + unsigned int *irq); + +static inline struct irq_desc *irq_resolve_mapping(struct irq_domain *domain, + irq_hw_number_t hwirq) +{ + return __irq_resolve_mapping(domain, hwirq, NULL); +} /** - * irq_linear_revmap() - Find a linux irq from a hw irq number. + * irq_find_mapping() - Find a linux irq from a hw irq number. * @domain: domain owning this hardware interrupt * @hwirq: hardware irq number in that domain space - * - * This is a fast path alternative to irq_find_mapping() that can be - * called directly by irq controller code to save a handful of - * instructions. It is always safe to call, but won't find irqs mapped - * using the radix tree. */ +static inline unsigned int irq_find_mapping(struct irq_domain *domain, + irq_hw_number_t hwirq) +{ + unsigned int irq; + + if (__irq_resolve_mapping(domain, hwirq, &irq)) + return irq; + + return 0; +} + static inline unsigned int irq_linear_revmap(struct irq_domain *domain, irq_hw_number_t hwirq) { - return hwirq < domain->revmap_size ? domain->linear_revmap[hwirq] : 0; + return irq_find_mapping(domain, hwirq); } -extern unsigned int irq_find_mapping(struct irq_domain *host, - irq_hw_number_t hwirq); -extern unsigned int irq_create_direct_mapping(struct irq_domain *host); extern const struct irq_domain_ops irq_domain_simple_ops; diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index db0e1920cb..fd933c4528 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -779,6 +779,11 @@ struct journal_s */ unsigned long j_flags; + /** + * @j_atomic_flags: Atomic journaling state flags. + */ + unsigned long j_atomic_flags; + /** * @j_errno: * @@ -904,6 +909,29 @@ struct journal_s */ struct buffer_head *j_chkpt_bhs[JBD2_NR_BATCH]; + /** + * @j_shrinker: + * + * Journal head shrinker, reclaim buffer's journal head which + * has been written back. + */ + struct shrinker j_shrinker; + + /** + * @j_checkpoint_jh_count: + * + * Number of journal buffers on the checkpoint list. [j_list_lock] + */ + struct percpu_counter j_checkpoint_jh_count; + + /** + * @j_shrink_transaction: + * + * Record next transaction will shrink on the checkpoint list. + * [j_list_lock] + */ + transaction_t *j_shrink_transaction; + /** * @j_head: * @@ -1370,6 +1398,16 @@ JBD2_FEATURE_INCOMPAT_FUNCS(fast_commit, FAST_COMMIT) * mode */ #define JBD2_FAST_COMMIT_ONGOING 0x100 /* Fast commit is ongoing */ #define JBD2_FULL_COMMIT_ONGOING 0x200 /* Full commit is ongoing */ +#define JBD2_JOURNAL_FLUSH_DISCARD 0x0001 +#define JBD2_JOURNAL_FLUSH_ZEROOUT 0x0002 +#define JBD2_JOURNAL_FLUSH_VALID (JBD2_JOURNAL_FLUSH_DISCARD | \ + JBD2_JOURNAL_FLUSH_ZEROOUT) + +/* + * Journal atomic flag definitions + */ +#define JBD2_CHECKPOINT_IO_ERROR 0x001 /* Detect io error while writing + * buffer back to disk */ /* * Function declarations for the journaling transaction and buffer @@ -1407,6 +1445,7 @@ extern void jbd2_journal_commit_transaction(journal_t *); /* Checkpoint list management */ void __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy); +unsigned long jbd2_journal_shrink_checkpoint_list(journal_t *journal, unsigned long *nr_to_scan); int __jbd2_journal_remove_checkpoint(struct journal_head *); void jbd2_journal_destroy_checkpoint(journal_t *journal); void __jbd2_journal_insert_checkpoint(struct journal_head *, transaction_t *); @@ -1500,7 +1539,7 @@ extern int jbd2_journal_invalidatepage(journal_t *, struct page *, unsigned int, unsigned int); extern int jbd2_journal_try_to_free_buffers(journal_t *journal, struct page *page); extern int jbd2_journal_stop(handle_t *); -extern int jbd2_journal_flush (journal_t *); +extern int jbd2_journal_flush(journal_t *journal, unsigned int flags); extern void jbd2_journal_lock_updates (journal_t *); extern void jbd2_journal_unlock_updates (journal_t *); diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h index 05f5554d86..48b9b2a827 100644 --- a/include/linux/jump_label.h +++ b/include/linux/jump_label.h @@ -171,9 +171,21 @@ static inline bool jump_entry_is_init(const struct jump_entry *entry) return (unsigned long)entry->key & 2UL; } -static inline void jump_entry_set_init(struct jump_entry *entry) +static inline void jump_entry_set_init(struct jump_entry *entry, bool set) { - entry->key |= 2; + if (set) + entry->key |= 2; + else + entry->key &= ~2; +} + +static inline int jump_entry_size(struct jump_entry *entry) +{ +#ifdef JUMP_LABEL_NOP_SIZE + return JUMP_LABEL_NOP_SIZE; +#else + return arch_jump_entry_size(entry); +#endif } #endif diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h index 465060acc9..a1d6fc82d7 100644 --- a/include/linux/kallsyms.h +++ b/include/linux/kallsyms.h @@ -7,6 +7,7 @@ #define _LINUX_KALLSYMS_H #include +#include #include #include #include @@ -15,8 +16,10 @@ #include #define KSYM_NAME_LEN 128 -#define KSYM_SYMBOL_LEN (sizeof("%s+%#lx/%#lx [%s]") + (KSYM_NAME_LEN - 1) + \ - 2*(BITS_PER_LONG*3/10) + (MODULE_NAME_LEN - 1) + 1) +#define KSYM_SYMBOL_LEN (sizeof("%s+%#lx/%#lx [%s %s]") + \ + (KSYM_NAME_LEN - 1) + \ + 2*(BITS_PER_LONG*3/10) + (MODULE_NAME_LEN - 1) + \ + (BUILD_ID_SIZE_MAX * 2) + 1) struct cred; struct module; @@ -91,8 +94,10 @@ const char *kallsyms_lookup(unsigned long addr, /* Look up a kernel symbol and return it in a text buffer. */ extern int sprint_symbol(char *buffer, unsigned long address); +extern int sprint_symbol_build_id(char *buffer, unsigned long address); extern int sprint_symbol_no_offset(char *buffer, unsigned long address); extern int sprint_backtrace(char *buffer, unsigned long address); +extern int sprint_backtrace_build_id(char *buffer, unsigned long address); int lookup_symbol_name(unsigned long addr, char *symname); int lookup_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name); @@ -128,6 +133,12 @@ static inline int sprint_symbol(char *buffer, unsigned long addr) return 0; } +static inline int sprint_symbol_build_id(char *buffer, unsigned long address) +{ + *buffer = '\0'; + return 0; +} + static inline int sprint_symbol_no_offset(char *buffer, unsigned long addr) { *buffer = '\0'; @@ -140,6 +151,12 @@ static inline int sprint_backtrace(char *buffer, unsigned long addr) return 0; } +static inline int sprint_backtrace_build_id(char *buffer, unsigned long addr) +{ + *buffer = '\0'; + return 0; +} + static inline int lookup_symbol_name(unsigned long addr, char *symname) { return -ERANGE; diff --git a/include/linux/kasan.h b/include/linux/kasan.h index b1678a61e6..dd874a1ee8 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -2,6 +2,8 @@ #ifndef _LINUX_KASAN_H #define _LINUX_KASAN_H +#include +#include #include #include @@ -17,7 +19,6 @@ struct task_struct; /* kasan_data struct is used in KUnit tests for KASAN expected failures */ struct kunit_kasan_expectation { - bool report_expected; bool report_found; }; @@ -41,9 +42,9 @@ struct kunit_kasan_expectation { #endif extern unsigned char kasan_early_shadow_page[PAGE_SIZE]; -extern pte_t kasan_early_shadow_pte[PTRS_PER_PTE + PTE_HWTABLE_PTRS]; -extern pmd_t kasan_early_shadow_pmd[PTRS_PER_PMD]; -extern pud_t kasan_early_shadow_pud[PTRS_PER_PUD]; +extern pte_t kasan_early_shadow_pte[MAX_PTRS_PER_PTE + PTE_HWTABLE_PTRS]; +extern pmd_t kasan_early_shadow_pmd[MAX_PTRS_PER_PMD]; +extern pud_t kasan_early_shadow_pud[MAX_PTRS_PER_PUD]; extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D]; int kasan_populate_early_shadow(const void *shadow_start, @@ -79,14 +80,6 @@ static inline void kasan_disable_current(void) {} #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ -#ifdef CONFIG_KASAN - -struct kasan_cache { - int alloc_meta_offset; - int free_meta_offset; - bool is_kmalloc; -}; - #ifdef CONFIG_KASAN_HW_TAGS DECLARE_STATIC_KEY_FALSE(kasan_flag_enabled); @@ -101,11 +94,14 @@ static inline bool kasan_has_integrated_init(void) return kasan_enabled(); } +void kasan_alloc_pages(struct page *page, unsigned int order, gfp_t flags); +void kasan_free_pages(struct page *page, unsigned int order); + #else /* CONFIG_KASAN_HW_TAGS */ static inline bool kasan_enabled(void) { - return true; + return IS_ENABLED(CONFIG_KASAN); } static inline bool kasan_has_integrated_init(void) @@ -113,8 +109,30 @@ static inline bool kasan_has_integrated_init(void) return false; } +static __always_inline void kasan_alloc_pages(struct page *page, + unsigned int order, gfp_t flags) +{ + /* Only available for integrated init. */ + BUILD_BUG(); +} + +static __always_inline void kasan_free_pages(struct page *page, + unsigned int order) +{ + /* Only available for integrated init. */ + BUILD_BUG(); +} + #endif /* CONFIG_KASAN_HW_TAGS */ +#ifdef CONFIG_KASAN + +struct kasan_cache { + int alloc_meta_offset; + int free_meta_offset; + bool is_kmalloc; +}; + slab_flags_t __kasan_never_merge(void); static __always_inline slab_flags_t kasan_never_merge(void) { @@ -130,20 +148,20 @@ static __always_inline void kasan_unpoison_range(const void *addr, size_t size) __kasan_unpoison_range(addr, size); } -void __kasan_alloc_pages(struct page *page, unsigned int order, bool init); -static __always_inline void kasan_alloc_pages(struct page *page, +void __kasan_poison_pages(struct page *page, unsigned int order, bool init); +static __always_inline void kasan_poison_pages(struct page *page, unsigned int order, bool init) { if (kasan_enabled()) - __kasan_alloc_pages(page, order, init); + __kasan_poison_pages(page, order, init); } -void __kasan_free_pages(struct page *page, unsigned int order, bool init); -static __always_inline void kasan_free_pages(struct page *page, - unsigned int order, bool init) +void __kasan_unpoison_pages(struct page *page, unsigned int order, bool init); +static __always_inline void kasan_unpoison_pages(struct page *page, + unsigned int order, bool init) { if (kasan_enabled()) - __kasan_free_pages(page, order, init); + __kasan_unpoison_pages(page, order, init); } void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size, @@ -285,21 +303,15 @@ void kasan_restore_multi_shot(bool enabled); #else /* CONFIG_KASAN */ -static inline bool kasan_enabled(void) -{ - return false; -} -static inline bool kasan_has_integrated_init(void) -{ - return false; -} static inline slab_flags_t kasan_never_merge(void) { return 0; } static inline void kasan_unpoison_range(const void *address, size_t size) {} -static inline void kasan_alloc_pages(struct page *page, unsigned int order, bool init) {} -static inline void kasan_free_pages(struct page *page, unsigned int order, bool init) {} +static inline void kasan_poison_pages(struct page *page, unsigned int order, + bool init) {} +static inline void kasan_unpoison_pages(struct page *page, unsigned int order, + bool init) {} static inline void kasan_cache_create(struct kmem_cache *cache, unsigned int *size, slab_flags_t *flags) {} diff --git a/include/linux/kconfig.h b/include/linux/kconfig.h index cc8fa109cf..20d1079e92 100644 --- a/include/linux/kconfig.h +++ b/include/linux/kconfig.h @@ -51,7 +51,8 @@ /* * IS_MODULE(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'm', 0 - * otherwise. + * otherwise. CONFIG_FOO=m results in "#define CONFIG_FOO_MODULE 1" in + * autoconf.h. */ #define IS_MODULE(option) __is_defined(option##_MODULE) @@ -66,7 +67,8 @@ /* * IS_ENABLED(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'y' or 'm', - * 0 otherwise. + * 0 otherwise. Note that CONFIG_FOO=y results in "#define CONFIG_FOO 1" in + * autoconf.h, while CONFIG_FOO=m results in "#define CONFIG_FOO_MODULE 1". */ #define IS_ENABLED(option) __or(IS_BUILTIN(option), IS_MODULE(option)) diff --git a/include/linux/kcore.h b/include/linux/kcore.h index da676cdbd7..86c0f1d189 100644 --- a/include/linux/kcore.h +++ b/include/linux/kcore.h @@ -11,14 +11,11 @@ enum kcore_type { KCORE_RAM, KCORE_VMEMMAP, KCORE_USER, - KCORE_OTHER, - KCORE_REMAP, }; struct kcore_list { struct list_head list; unsigned long addr; - unsigned long vaddr; size_t size; int type; }; diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 15d8bad3d2..1b2f0a7e00 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -10,10 +10,12 @@ #include #include #include +#include #include #include #include #include +#include #include #include #include @@ -71,8 +73,19 @@ */ #define lower_32_bits(n) ((u32)((n) & 0xffffffff)) +/** + * upper_16_bits - return bits 16-31 of a number + * @n: the number we're accessing + */ +#define upper_16_bits(n) ((u16)((n) >> 16)) + +/** + * lower_16_bits - return bits 0-15 of a number + * @n: the number we're accessing + */ +#define lower_16_bits(n) ((u16)((n) & 0xffff)) + struct completion; -struct pt_regs; struct user; #ifdef CONFIG_PREEMPT_VOLUNTARY @@ -177,159 +190,9 @@ void __might_fault(const char *file, int line); static inline void might_fault(void) { } #endif -extern struct atomic_notifier_head panic_notifier_list; -extern long (*panic_blink)(int state); -__printf(1, 2) -void panic(const char *fmt, ...) __noreturn __cold; -void nmi_panic(struct pt_regs *regs, const char *msg); -extern void oops_enter(void); -extern void oops_exit(void); -extern bool oops_may_print(void); void do_exit(long error_code) __noreturn; void complete_and_exit(struct completion *, long) __noreturn; -/* Internal, do not use. */ -int __must_check _kstrtoul(const char *s, unsigned int base, unsigned long *res); -int __must_check _kstrtol(const char *s, unsigned int base, long *res); - -int __must_check kstrtoull(const char *s, unsigned int base, unsigned long long *res); -int __must_check kstrtoll(const char *s, unsigned int base, long long *res); - -/** - * kstrtoul - convert a string to an unsigned long - * @s: The start of the string. The string must be null-terminated, and may also - * include a single newline before its terminating null. The first character - * may also be a plus sign, but not a minus sign. - * @base: The number base to use. The maximum supported base is 16. If base is - * given as 0, then the base of the string is automatically detected with the - * conventional semantics - If it begins with 0x the number will be parsed as a - * hexadecimal (case insensitive), if it otherwise begins with 0, it will be - * parsed as an octal number. Otherwise it will be parsed as a decimal. - * @res: Where to write the result of the conversion on success. - * - * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error. - * Preferred over simple_strtoul(). Return code must be checked. -*/ -static inline int __must_check kstrtoul(const char *s, unsigned int base, unsigned long *res) -{ - /* - * We want to shortcut function call, but - * __builtin_types_compatible_p(unsigned long, unsigned long long) = 0. - */ - if (sizeof(unsigned long) == sizeof(unsigned long long) && - __alignof__(unsigned long) == __alignof__(unsigned long long)) - return kstrtoull(s, base, (unsigned long long *)res); - else - return _kstrtoul(s, base, res); -} - -/** - * kstrtol - convert a string to a long - * @s: The start of the string. The string must be null-terminated, and may also - * include a single newline before its terminating null. The first character - * may also be a plus sign or a minus sign. - * @base: The number base to use. The maximum supported base is 16. If base is - * given as 0, then the base of the string is automatically detected with the - * conventional semantics - If it begins with 0x the number will be parsed as a - * hexadecimal (case insensitive), if it otherwise begins with 0, it will be - * parsed as an octal number. Otherwise it will be parsed as a decimal. - * @res: Where to write the result of the conversion on success. - * - * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error. - * Preferred over simple_strtol(). Return code must be checked. - */ -static inline int __must_check kstrtol(const char *s, unsigned int base, long *res) -{ - /* - * We want to shortcut function call, but - * __builtin_types_compatible_p(long, long long) = 0. - */ - if (sizeof(long) == sizeof(long long) && - __alignof__(long) == __alignof__(long long)) - return kstrtoll(s, base, (long long *)res); - else - return _kstrtol(s, base, res); -} - -int __must_check kstrtouint(const char *s, unsigned int base, unsigned int *res); -int __must_check kstrtoint(const char *s, unsigned int base, int *res); - -static inline int __must_check kstrtou64(const char *s, unsigned int base, u64 *res) -{ - return kstrtoull(s, base, res); -} - -static inline int __must_check kstrtos64(const char *s, unsigned int base, s64 *res) -{ - return kstrtoll(s, base, res); -} - -static inline int __must_check kstrtou32(const char *s, unsigned int base, u32 *res) -{ - return kstrtouint(s, base, res); -} - -static inline int __must_check kstrtos32(const char *s, unsigned int base, s32 *res) -{ - return kstrtoint(s, base, res); -} - -int __must_check kstrtou16(const char *s, unsigned int base, u16 *res); -int __must_check kstrtos16(const char *s, unsigned int base, s16 *res); -int __must_check kstrtou8(const char *s, unsigned int base, u8 *res); -int __must_check kstrtos8(const char *s, unsigned int base, s8 *res); -int __must_check kstrtobool(const char *s, bool *res); - -int __must_check kstrtoull_from_user(const char __user *s, size_t count, unsigned int base, unsigned long long *res); -int __must_check kstrtoll_from_user(const char __user *s, size_t count, unsigned int base, long long *res); -int __must_check kstrtoul_from_user(const char __user *s, size_t count, unsigned int base, unsigned long *res); -int __must_check kstrtol_from_user(const char __user *s, size_t count, unsigned int base, long *res); -int __must_check kstrtouint_from_user(const char __user *s, size_t count, unsigned int base, unsigned int *res); -int __must_check kstrtoint_from_user(const char __user *s, size_t count, unsigned int base, int *res); -int __must_check kstrtou16_from_user(const char __user *s, size_t count, unsigned int base, u16 *res); -int __must_check kstrtos16_from_user(const char __user *s, size_t count, unsigned int base, s16 *res); -int __must_check kstrtou8_from_user(const char __user *s, size_t count, unsigned int base, u8 *res); -int __must_check kstrtos8_from_user(const char __user *s, size_t count, unsigned int base, s8 *res); -int __must_check kstrtobool_from_user(const char __user *s, size_t count, bool *res); - -static inline int __must_check kstrtou64_from_user(const char __user *s, size_t count, unsigned int base, u64 *res) -{ - return kstrtoull_from_user(s, count, base, res); -} - -static inline int __must_check kstrtos64_from_user(const char __user *s, size_t count, unsigned int base, s64 *res) -{ - return kstrtoll_from_user(s, count, base, res); -} - -static inline int __must_check kstrtou32_from_user(const char __user *s, size_t count, unsigned int base, u32 *res) -{ - return kstrtouint_from_user(s, count, base, res); -} - -static inline int __must_check kstrtos32_from_user(const char __user *s, size_t count, unsigned int base, s32 *res) -{ - return kstrtoint_from_user(s, count, base, res); -} - -/* - * Use kstrto instead. - * - * NOTE: simple_strto does not check for the range overflow and, - * depending on the input, may give interesting results. - * - * Use these functions if and only if you cannot use kstrto, because - * the conversion ends on the first non-digit character, which may be far - * beyond the supported range. It might be useful to parse the strings like - * 10x50 or 12:21 without altering original string or temporary buffer in use. - * Keep in mind above caveat. - */ - -extern unsigned long simple_strtoul(const char *,char **,unsigned int); -extern long simple_strtol(const char *,char **,unsigned int); -extern unsigned long long simple_strtoull(const char *,char **,unsigned int); -extern long long simple_strtoll(const char *,char **,unsigned int); - extern int num_to_str(char *buf, int size, unsigned long long num, unsigned int width); @@ -357,6 +220,8 @@ int sscanf(const char *, const char *, ...); extern __scanf(2, 0) int vsscanf(const char *, const char *, va_list); +extern int no_hash_pointers_enable(char *str); + extern int get_option(char **str, int *pint); extern char *get_options(const char *str, int nints, int *ints); extern unsigned long long memparse(const char *ptr, char **retptr); @@ -370,52 +235,8 @@ extern int __kernel_text_address(unsigned long addr); extern int kernel_text_address(unsigned long addr); extern int func_ptr_is_kernel_text(void *ptr); -#ifdef CONFIG_SMP -extern unsigned int sysctl_oops_all_cpu_backtrace; -#else -#define sysctl_oops_all_cpu_backtrace 0 -#endif /* CONFIG_SMP */ - extern void bust_spinlocks(int yes); -extern int panic_timeout; -extern unsigned long panic_print; -extern int panic_on_oops; -extern int panic_on_unrecovered_nmi; -extern int panic_on_io_nmi; -extern int panic_on_warn; -extern unsigned long panic_on_taint; -extern bool panic_on_taint_nousertaint; -extern int sysctl_panic_on_rcu_stall; -extern int sysctl_max_rcu_stall_to_panic; -extern int sysctl_panic_on_stackoverflow; -extern bool crash_kexec_post_notifiers; - -/* - * panic_cpu is used for synchronizing panic() and crash_kexec() execution. It - * holds a CPU number which is executing panic() currently. A value of - * PANIC_CPU_INVALID means no CPU has entered panic() or crash_kexec(). - */ -extern atomic_t panic_cpu; -#define PANIC_CPU_INVALID -1 - -/* - * Only to be used by arch init code. If the user over-wrote the default - * CONFIG_PANIC_TIMEOUT, honor it. - */ -static inline void set_arch_panic_timeout(int timeout, int arch_default_timeout) -{ - if (panic_timeout == arch_default_timeout) - panic_timeout = timeout; -} -extern const char *print_tainted(void); -enum lockdep_ok { - LOCKDEP_STILL_OK, - LOCKDEP_NOW_UNRELIABLE -}; -extern void add_taint(unsigned flag, enum lockdep_ok); -extern int test_taint(unsigned flag); -extern unsigned long get_taint(void); extern int root_mountflags; extern bool early_boot_irqs_disabled; @@ -434,36 +255,6 @@ extern enum system_states { SYSTEM_SUSPEND, } system_state; -/* This cannot be an enum because some may be used in assembly source. */ -#define TAINT_PROPRIETARY_MODULE 0 -#define TAINT_FORCED_MODULE 1 -#define TAINT_CPU_OUT_OF_SPEC 2 -#define TAINT_FORCED_RMMOD 3 -#define TAINT_MACHINE_CHECK 4 -#define TAINT_BAD_PAGE 5 -#define TAINT_USER 6 -#define TAINT_DIE 7 -#define TAINT_OVERRIDDEN_ACPI_TABLE 8 -#define TAINT_WARN 9 -#define TAINT_CRAP 10 -#define TAINT_FIRMWARE_WORKAROUND 11 -#define TAINT_OOT_MODULE 12 -#define TAINT_UNSIGNED_MODULE 13 -#define TAINT_SOFTLOCKUP 14 -#define TAINT_LIVEPATCH 15 -#define TAINT_AUX 16 -#define TAINT_RANDSTRUCT 17 -#define TAINT_FLAGS_COUNT 18 -#define TAINT_FLAGS_MAX ((1UL << TAINT_FLAGS_COUNT) - 1) - -struct taint_flag { - char c_true; /* character printed when tainted */ - char c_false; /* character printed when not tainted */ - bool module; /* also show as a per-module taint flag */ -}; - -extern const struct taint_flag taint_flags[TAINT_FLAGS_COUNT]; - extern const char hex_asc[]; #define hex_asc_lo(x) hex_asc[((x) & 0x0f)] #define hex_asc_hi(x) hex_asc[((x) & 0xf0) >> 4] diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h index 392a367094..258cdde8d3 100644 --- a/include/linux/kgdb.h +++ b/include/linux/kgdb.h @@ -105,9 +105,9 @@ extern int dbg_set_reg(int regno, void *mem, struct pt_regs *regs); */ /** - * kgdb_arch_init - Perform any architecture specific initalization. + * kgdb_arch_init - Perform any architecture specific initialization. * - * This function will handle the initalization of any architecture + * This function will handle the initialization of any architecture * specific callbacks. */ extern int kgdb_arch_init(void); @@ -229,9 +229,9 @@ extern int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt); extern int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt); /** - * kgdb_arch_late - Perform any architecture specific initalization. + * kgdb_arch_late - Perform any architecture specific initialization. * - * This function will handle the late initalization of any + * This function will handle the late initialization of any * architecture specific callbacks. This is an optional function for * handling things like late initialization of hw breakpoints. The * default implementation does nothing. diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index 1883a4a9f1..e4f3bfe087 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -54,8 +54,6 @@ struct kretprobe_instance; typedef int (*kprobe_pre_handler_t) (struct kprobe *, struct pt_regs *); typedef void (*kprobe_post_handler_t) (struct kprobe *, struct pt_regs *, unsigned long flags); -typedef int (*kprobe_fault_handler_t) (struct kprobe *, struct pt_regs *, - int trapnr); typedef int (*kretprobe_handler_t) (struct kretprobe_instance *, struct pt_regs *); @@ -83,12 +81,6 @@ struct kprobe { /* Called after addr is executed, unless... */ kprobe_post_handler_t post_handler; - /* - * ... called if executing addr causes a fault (eg. page fault). - * Return 1 if it handled fault, otherwise kernel will see it. - */ - kprobe_fault_handler_t fault_handler; - /* Saved opcode (which has been replaced with breakpoint) */ kprobe_opcode_t opcode; @@ -407,7 +399,9 @@ int enable_kprobe(struct kprobe *kp); void dump_kprobe(struct kprobe *kp); void *alloc_insn_page(void); -void free_insn_page(void *page); + +void *alloc_optinsn_page(void); +void free_optinsn_page(void *page); int kprobe_get_kallsym(unsigned int symnum, unsigned long *value, char *type, char *sym); diff --git a/include/linux/kstrtox.h b/include/linux/kstrtox.h new file mode 100644 index 0000000000..529974e22e --- /dev/null +++ b/include/linux/kstrtox.h @@ -0,0 +1,155 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_KSTRTOX_H +#define _LINUX_KSTRTOX_H + +#include +#include + +/* Internal, do not use. */ +int __must_check _kstrtoul(const char *s, unsigned int base, unsigned long *res); +int __must_check _kstrtol(const char *s, unsigned int base, long *res); + +int __must_check kstrtoull(const char *s, unsigned int base, unsigned long long *res); +int __must_check kstrtoll(const char *s, unsigned int base, long long *res); + +/** + * kstrtoul - convert a string to an unsigned long + * @s: The start of the string. The string must be null-terminated, and may also + * include a single newline before its terminating null. The first character + * may also be a plus sign, but not a minus sign. + * @base: The number base to use. The maximum supported base is 16. If base is + * given as 0, then the base of the string is automatically detected with the + * conventional semantics - If it begins with 0x the number will be parsed as a + * hexadecimal (case insensitive), if it otherwise begins with 0, it will be + * parsed as an octal number. Otherwise it will be parsed as a decimal. + * @res: Where to write the result of the conversion on success. + * + * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error. + * Preferred over simple_strtoul(). Return code must be checked. +*/ +static inline int __must_check kstrtoul(const char *s, unsigned int base, unsigned long *res) +{ + /* + * We want to shortcut function call, but + * __builtin_types_compatible_p(unsigned long, unsigned long long) = 0. + */ + if (sizeof(unsigned long) == sizeof(unsigned long long) && + __alignof__(unsigned long) == __alignof__(unsigned long long)) + return kstrtoull(s, base, (unsigned long long *)res); + else + return _kstrtoul(s, base, res); +} + +/** + * kstrtol - convert a string to a long + * @s: The start of the string. The string must be null-terminated, and may also + * include a single newline before its terminating null. The first character + * may also be a plus sign or a minus sign. + * @base: The number base to use. The maximum supported base is 16. If base is + * given as 0, then the base of the string is automatically detected with the + * conventional semantics - If it begins with 0x the number will be parsed as a + * hexadecimal (case insensitive), if it otherwise begins with 0, it will be + * parsed as an octal number. Otherwise it will be parsed as a decimal. + * @res: Where to write the result of the conversion on success. + * + * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error. + * Preferred over simple_strtol(). Return code must be checked. + */ +static inline int __must_check kstrtol(const char *s, unsigned int base, long *res) +{ + /* + * We want to shortcut function call, but + * __builtin_types_compatible_p(long, long long) = 0. + */ + if (sizeof(long) == sizeof(long long) && + __alignof__(long) == __alignof__(long long)) + return kstrtoll(s, base, (long long *)res); + else + return _kstrtol(s, base, res); +} + +int __must_check kstrtouint(const char *s, unsigned int base, unsigned int *res); +int __must_check kstrtoint(const char *s, unsigned int base, int *res); + +static inline int __must_check kstrtou64(const char *s, unsigned int base, u64 *res) +{ + return kstrtoull(s, base, res); +} + +static inline int __must_check kstrtos64(const char *s, unsigned int base, s64 *res) +{ + return kstrtoll(s, base, res); +} + +static inline int __must_check kstrtou32(const char *s, unsigned int base, u32 *res) +{ + return kstrtouint(s, base, res); +} + +static inline int __must_check kstrtos32(const char *s, unsigned int base, s32 *res) +{ + return kstrtoint(s, base, res); +} + +int __must_check kstrtou16(const char *s, unsigned int base, u16 *res); +int __must_check kstrtos16(const char *s, unsigned int base, s16 *res); +int __must_check kstrtou8(const char *s, unsigned int base, u8 *res); +int __must_check kstrtos8(const char *s, unsigned int base, s8 *res); +int __must_check kstrtobool(const char *s, bool *res); + +int __must_check kstrtoull_from_user(const char __user *s, size_t count, unsigned int base, unsigned long long *res); +int __must_check kstrtoll_from_user(const char __user *s, size_t count, unsigned int base, long long *res); +int __must_check kstrtoul_from_user(const char __user *s, size_t count, unsigned int base, unsigned long *res); +int __must_check kstrtol_from_user(const char __user *s, size_t count, unsigned int base, long *res); +int __must_check kstrtouint_from_user(const char __user *s, size_t count, unsigned int base, unsigned int *res); +int __must_check kstrtoint_from_user(const char __user *s, size_t count, unsigned int base, int *res); +int __must_check kstrtou16_from_user(const char __user *s, size_t count, unsigned int base, u16 *res); +int __must_check kstrtos16_from_user(const char __user *s, size_t count, unsigned int base, s16 *res); +int __must_check kstrtou8_from_user(const char __user *s, size_t count, unsigned int base, u8 *res); +int __must_check kstrtos8_from_user(const char __user *s, size_t count, unsigned int base, s8 *res); +int __must_check kstrtobool_from_user(const char __user *s, size_t count, bool *res); + +static inline int __must_check kstrtou64_from_user(const char __user *s, size_t count, unsigned int base, u64 *res) +{ + return kstrtoull_from_user(s, count, base, res); +} + +static inline int __must_check kstrtos64_from_user(const char __user *s, size_t count, unsigned int base, s64 *res) +{ + return kstrtoll_from_user(s, count, base, res); +} + +static inline int __must_check kstrtou32_from_user(const char __user *s, size_t count, unsigned int base, u32 *res) +{ + return kstrtouint_from_user(s, count, base, res); +} + +static inline int __must_check kstrtos32_from_user(const char __user *s, size_t count, unsigned int base, s32 *res) +{ + return kstrtoint_from_user(s, count, base, res); +} + +/* + * Use kstrto instead. + * + * NOTE: simple_strto does not check for the range overflow and, + * depending on the input, may give interesting results. + * + * Use these functions if and only if you cannot use kstrto, because + * the conversion ends on the first non-digit character, which may be far + * beyond the supported range. It might be useful to parse the strings like + * 10x50 or 12:21 without altering original string or temporary buffer in use. + * Keep in mind above caveat. + */ + +extern unsigned long simple_strtoul(const char *,char **,unsigned int); +extern long simple_strtol(const char *,char **,unsigned int); +extern unsigned long long simple_strtoull(const char *,char **,unsigned int); +extern long long simple_strtoll(const char *,char **,unsigned int); + +static inline int strtobool(const char *s, bool *res) +{ + return kstrtobool(s, res); +} + +#endif /* _LINUX_KSTRTOX_H */ diff --git a/include/linux/kthread.h b/include/linux/kthread.h index 2484ed97e7..346b0f2691 100644 --- a/include/linux/kthread.h +++ b/include/linux/kthread.h @@ -18,7 +18,7 @@ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data), * @threadfn: the function to run in the thread * @data: data pointer for @threadfn() * @namefmt: printf-style format string for the thread name - * @arg...: arguments for @namefmt. + * @arg: arguments for @namefmt. * * This macro will create a kthread on the current node, leaving it in * the stopped state. This is just a helper for kthread_create_on_node(); @@ -33,6 +33,8 @@ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data), unsigned int cpu, const char *namefmt); +void set_kthread_struct(struct task_struct *p); + void kthread_set_per_cpu(struct task_struct *k, int cpu); bool kthread_is_per_cpu(struct task_struct *k); diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 8583ed3ff3..ae7735b490 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -28,6 +28,7 @@ #include #include #include +#include #include #include @@ -304,7 +305,6 @@ struct kvm_vcpu { struct pid __rcu *pid; int sigset_active; sigset_t sigset; - struct kvm_vcpu_stat stat; unsigned int halt_poll_ns; bool valid_wakeup; @@ -341,6 +341,8 @@ struct kvm_vcpu { bool preempted; bool ready; struct kvm_vcpu_arch arch; + struct kvm_vcpu_stat stat; + char stats_id[KVM_STATS_NAME_SIZE]; struct kvm_dirty_ring dirty_ring; }; @@ -523,6 +525,15 @@ struct kvm { #endif /* KVM_HAVE_MMU_RWLOCK */ struct mutex slots_lock; + + /* + * Protects the arch-specific fields of struct kvm_memory_slots in + * use by the VM. To be used under the slots_lock (above) or in a + * kvm->srcu critical section where acquiring the slots_lock would + * lead to deadlock with the synchronize_srcu in + * install_new_memslots. + */ + struct mutex slots_arch_lock; struct mm_struct *mm; /* userspace tied to this vm */ struct kvm_memslots __rcu *memslots[KVM_ADDRESS_SPACE_NUM]; struct kvm_vcpu *vcpus[KVM_MAX_VCPUS]; @@ -585,6 +596,11 @@ struct kvm { pid_t userspace_pid; unsigned int max_halt_poll_ns; u32 dirty_ring_size; + +#ifdef CONFIG_HAVE_KVM_PM_NOTIFIER + struct notifier_block pm_notifier; +#endif + char stats_id[KVM_STATS_NAME_SIZE]; }; #define kvm_err(fmt, ...) \ @@ -998,6 +1014,10 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu); +#ifdef CONFIG_HAVE_KVM_PM_NOTIFIER +int kvm_arch_pm_notifier(struct kvm *kvm, unsigned long state); +#endif + #ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry); #endif @@ -1244,26 +1264,104 @@ enum kvm_stat_kind { struct kvm_stat_data { struct kvm *kvm; - struct kvm_stats_debugfs_item *dbgfs_item; -}; - -struct kvm_stats_debugfs_item { - const char *name; - int offset; + const struct _kvm_stats_desc *desc; enum kvm_stat_kind kind; - int mode; }; -#define KVM_DBGFS_GET_MODE(dbgfs_item) \ - ((dbgfs_item)->mode ? (dbgfs_item)->mode : 0644) +struct _kvm_stats_desc { + struct kvm_stats_desc desc; + char name[KVM_STATS_NAME_SIZE]; +}; -#define VM_STAT(n, x, ...) \ - { n, offsetof(struct kvm, stat.x), KVM_STAT_VM, ## __VA_ARGS__ } -#define VCPU_STAT(n, x, ...) \ - { n, offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU, ## __VA_ARGS__ } +#define STATS_DESC_COMMON(type, unit, base, exp) \ + .flags = type | unit | base | \ + BUILD_BUG_ON_ZERO(type & ~KVM_STATS_TYPE_MASK) | \ + BUILD_BUG_ON_ZERO(unit & ~KVM_STATS_UNIT_MASK) | \ + BUILD_BUG_ON_ZERO(base & ~KVM_STATS_BASE_MASK), \ + .exponent = exp, \ + .size = 1 + +#define VM_GENERIC_STATS_DESC(stat, type, unit, base, exp) \ + { \ + { \ + STATS_DESC_COMMON(type, unit, base, exp), \ + .offset = offsetof(struct kvm_vm_stat, generic.stat) \ + }, \ + .name = #stat, \ + } +#define VCPU_GENERIC_STATS_DESC(stat, type, unit, base, exp) \ + { \ + { \ + STATS_DESC_COMMON(type, unit, base, exp), \ + .offset = offsetof(struct kvm_vcpu_stat, generic.stat) \ + }, \ + .name = #stat, \ + } +#define VM_STATS_DESC(stat, type, unit, base, exp) \ + { \ + { \ + STATS_DESC_COMMON(type, unit, base, exp), \ + .offset = offsetof(struct kvm_vm_stat, stat) \ + }, \ + .name = #stat, \ + } +#define VCPU_STATS_DESC(stat, type, unit, base, exp) \ + { \ + { \ + STATS_DESC_COMMON(type, unit, base, exp), \ + .offset = offsetof(struct kvm_vcpu_stat, stat) \ + }, \ + .name = #stat, \ + } +/* SCOPE: VM, VM_GENERIC, VCPU, VCPU_GENERIC */ +#define STATS_DESC(SCOPE, stat, type, unit, base, exp) \ + SCOPE##_STATS_DESC(stat, type, unit, base, exp) + +#define STATS_DESC_CUMULATIVE(SCOPE, name, unit, base, exponent) \ + STATS_DESC(SCOPE, name, KVM_STATS_TYPE_CUMULATIVE, unit, base, exponent) +#define STATS_DESC_INSTANT(SCOPE, name, unit, base, exponent) \ + STATS_DESC(SCOPE, name, KVM_STATS_TYPE_INSTANT, unit, base, exponent) +#define STATS_DESC_PEAK(SCOPE, name, unit, base, exponent) \ + STATS_DESC(SCOPE, name, KVM_STATS_TYPE_PEAK, unit, base, exponent) + +/* Cumulative counter, read/write */ +#define STATS_DESC_COUNTER(SCOPE, name) \ + STATS_DESC_CUMULATIVE(SCOPE, name, KVM_STATS_UNIT_NONE, \ + KVM_STATS_BASE_POW10, 0) +/* Instantaneous counter, read only */ +#define STATS_DESC_ICOUNTER(SCOPE, name) \ + STATS_DESC_INSTANT(SCOPE, name, KVM_STATS_UNIT_NONE, \ + KVM_STATS_BASE_POW10, 0) +/* Peak counter, read/write */ +#define STATS_DESC_PCOUNTER(SCOPE, name) \ + STATS_DESC_PEAK(SCOPE, name, KVM_STATS_UNIT_NONE, \ + KVM_STATS_BASE_POW10, 0) + +/* Cumulative time in nanosecond */ +#define STATS_DESC_TIME_NSEC(SCOPE, name) \ + STATS_DESC_CUMULATIVE(SCOPE, name, KVM_STATS_UNIT_SECONDS, \ + KVM_STATS_BASE_POW10, -9) + +#define KVM_GENERIC_VM_STATS() \ + STATS_DESC_COUNTER(VM_GENERIC, remote_tlb_flush) + +#define KVM_GENERIC_VCPU_STATS() \ + STATS_DESC_COUNTER(VCPU_GENERIC, halt_successful_poll), \ + STATS_DESC_COUNTER(VCPU_GENERIC, halt_attempted_poll), \ + STATS_DESC_COUNTER(VCPU_GENERIC, halt_poll_invalid), \ + STATS_DESC_COUNTER(VCPU_GENERIC, halt_wakeup), \ + STATS_DESC_TIME_NSEC(VCPU_GENERIC, halt_poll_success_ns), \ + STATS_DESC_TIME_NSEC(VCPU_GENERIC, halt_poll_fail_ns) -extern struct kvm_stats_debugfs_item debugfs_entries[]; extern struct dentry *kvm_debugfs_dir; +ssize_t kvm_stats_read(char *id, const struct kvm_stats_header *header, + const struct _kvm_stats_desc *desc, + void *stats, size_t size_stats, + char __user *user_buffer, size_t size, loff_t *offset); +extern const struct kvm_stats_header kvm_vm_stats_header; +extern const struct _kvm_stats_desc kvm_vm_stats_desc[]; +extern const struct kvm_stats_header kvm_vcpu_stats_header; +extern const struct _kvm_stats_desc kvm_vcpu_stats_desc[]; #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq) diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h index a7580f69dd..ed6a985c56 100644 --- a/include/linux/kvm_types.h +++ b/include/linux/kvm_types.h @@ -76,5 +76,19 @@ struct kvm_mmu_memory_cache { }; #endif +struct kvm_vm_stat_generic { + u64 remote_tlb_flush; +}; + +struct kvm_vcpu_stat_generic { + u64 halt_successful_poll; + u64 halt_attempted_poll; + u64 halt_poll_invalid; + u64 halt_wakeup; + u64 halt_poll_success_ns; + u64 halt_poll_fail_ns; +}; + +#define KVM_STATS_NAME_SIZE 48 #endif /* __KVM_TYPES_H__ */ diff --git a/include/linux/libata.h b/include/linux/libata.h index 5f550eb27f..3fcd242367 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -1397,25 +1397,28 @@ extern struct device_attribute *ata_common_sdev_attrs[]; ATA_SCSI_COMPAT_IOCTL \ .queuecommand = ata_scsi_queuecmd, \ .dma_need_drain = ata_scsi_dma_need_drain, \ - .can_queue = ATA_DEF_QUEUE, \ - .tag_alloc_policy = BLK_TAG_ALLOC_RR, \ .this_id = ATA_SHT_THIS_ID, \ .emulated = ATA_SHT_EMULATED, \ .proc_name = drv_name, \ - .slave_configure = ata_scsi_slave_config, \ .slave_destroy = ata_scsi_slave_destroy, \ .bios_param = ata_std_bios_param, \ .unlock_native_capacity = ata_scsi_unlock_native_capacity -#define ATA_BASE_SHT(drv_name) \ +#define ATA_SUBBASE_SHT(drv_name) \ __ATA_BASE_SHT(drv_name), \ + .can_queue = ATA_DEF_QUEUE, \ + .tag_alloc_policy = BLK_TAG_ALLOC_RR, \ + .slave_configure = ata_scsi_slave_config + +#define ATA_BASE_SHT(drv_name) \ + ATA_SUBBASE_SHT(drv_name), \ .sdev_attrs = ata_common_sdev_attrs #ifdef CONFIG_SATA_HOST extern struct device_attribute *ata_ncq_sdev_attrs[]; #define ATA_NCQ_SHT(drv_name) \ - __ATA_BASE_SHT(drv_name), \ + ATA_SUBBASE_SHT(drv_name), \ .sdev_attrs = ata_ncq_sdev_attrs, \ .change_queue_depth = ata_scsi_change_queue_depth #endif diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h index 89b69e645a..7074aa9af5 100644 --- a/include/linux/libnvdimm.h +++ b/include/linux/libnvdimm.h @@ -278,6 +278,7 @@ static inline struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus, return __nvdimm_create(nvdimm_bus, provider_data, groups, flags, cmd_mask, num_flush, flush_wpq, NULL, NULL, NULL); } +void nvdimm_delete(struct nvdimm *nvdimm); const struct nd_cmd_desc *nd_cmd_dimm_desc(int cmd); const struct nd_cmd_desc *nd_cmd_bus_desc(int cmd); diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h index 9dcaa3e582..1b5fceb565 100644 --- a/include/linux/list_lru.h +++ b/include/linux/list_lru.h @@ -146,7 +146,7 @@ typedef enum lru_status (*list_lru_walk_cb)(struct list_head *item, * @lru: the lru pointer. * @nid: the node id to scan from. * @memcg: the cgroup to scan from. - * @isolate: callback function that is resposible for deciding what to do with + * @isolate: callback function that is responsible for deciding what to do with * the item currently being scanned * @cb_arg: opaque type that will be passed to @isolate * @nr_to_walk: how many items to scan. @@ -172,7 +172,7 @@ unsigned long list_lru_walk_one(struct list_lru *lru, * @lru: the lru pointer. * @nid: the node id to scan from. * @memcg: the cgroup to scan from. - * @isolate: callback function that is resposible for deciding what to do with + * @isolate: callback function that is responsible for deciding what to do with * the item currently being scanned * @cb_arg: opaque type that will be passed to @isolate * @nr_to_walk: how many items to scan. diff --git a/include/linux/litex.h b/include/linux/litex.h index 5ea9ccf5cc..f2edb86d5f 100644 --- a/include/linux/litex.h +++ b/include/linux/litex.h @@ -11,18 +11,6 @@ #include -/* LiteX SoCs support 8- or 32-bit CSR Bus data width (i.e., subreg. size) */ -#if defined(CONFIG_LITEX_SUBREG_SIZE) && \ - (CONFIG_LITEX_SUBREG_SIZE == 1 || CONFIG_LITEX_SUBREG_SIZE == 4) -#define LITEX_SUBREG_SIZE CONFIG_LITEX_SUBREG_SIZE -#else -#error LiteX subregister size (LITEX_SUBREG_SIZE) must be 4 or 1! -#endif -#define LITEX_SUBREG_SIZE_BIT (LITEX_SUBREG_SIZE * 8) - -/* LiteX subregisters of any width are always aligned on a 4-byte boundary */ -#define LITEX_SUBREG_ALIGN 0x4 - static inline void _write_litex_subregister(u32 val, void __iomem *addr) { writel((u32 __force)cpu_to_le32(val), addr); @@ -42,115 +30,54 @@ static inline u32 _read_litex_subregister(void __iomem *addr) * 32-bit wide logical CSR will be laid out as four 32-bit physical * subregisters, each one containing one byte of meaningful data. * + * For Linux support, upstream LiteX enforces a 32-bit wide CSR bus, which + * means that only larger-than-32-bit CSRs will be split across multiple + * subregisters (e.g., a 64-bit CSR will be spread across two consecutive + * 32-bit subregisters). + * * For details see: https://github.com/enjoy-digital/litex/wiki/CSR-Bus */ -/* number of LiteX subregisters needed to store a register of given reg_size */ -#define _litex_num_subregs(reg_size) \ - (((reg_size) - 1) / LITEX_SUBREG_SIZE + 1) - -/* - * since the number of 4-byte aligned subregisters required to store a single - * LiteX CSR (MMIO) register varies with LITEX_SUBREG_SIZE, the offset of the - * next adjacent LiteX CSR register w.r.t. the offset of the current one also - * depends on how many subregisters the latter is spread across - */ -#define _next_reg_off(off, size) \ - ((off) + _litex_num_subregs(size) * LITEX_SUBREG_ALIGN) - -/* - * The purpose of `_litex_[set|get]_reg()` is to implement the logic of - * writing to/reading from the LiteX CSR in a single place that can be then - * reused by all LiteX drivers via the `litex_[write|read][8|16|32|64]()` - * accessors for the appropriate data width. - * NOTE: direct use of `_litex_[set|get]_reg()` by LiteX drivers is strongly - * discouraged, as they perform no error checking on the requested data width! - */ - -/** - * _litex_set_reg() - Writes a value to the LiteX CSR (Control&Status Register) - * @reg: Address of the CSR - * @reg_size: The width of the CSR expressed in the number of bytes - * @val: Value to be written to the CSR - * - * This function splits a single (possibly multi-byte) LiteX CSR write into - * a series of subregister writes with a proper offset. - * NOTE: caller is responsible for ensuring (0 < reg_size <= sizeof(u64)). - */ -static inline void _litex_set_reg(void __iomem *reg, size_t reg_size, u64 val) -{ - u8 shift = _litex_num_subregs(reg_size) * LITEX_SUBREG_SIZE_BIT; - - while (shift > 0) { - shift -= LITEX_SUBREG_SIZE_BIT; - _write_litex_subregister(val >> shift, reg); - reg += LITEX_SUBREG_ALIGN; - } -} - -/** - * _litex_get_reg() - Reads a value of the LiteX CSR (Control&Status Register) - * @reg: Address of the CSR - * @reg_size: The width of the CSR expressed in the number of bytes - * - * Return: Value read from the CSR - * - * This function generates a series of subregister reads with a proper offset - * and joins their results into a single (possibly multi-byte) LiteX CSR value. - * NOTE: caller is responsible for ensuring (0 < reg_size <= sizeof(u64)). - */ -static inline u64 _litex_get_reg(void __iomem *reg, size_t reg_size) -{ - u64 r; - u8 i; - - r = _read_litex_subregister(reg); - for (i = 1; i < _litex_num_subregs(reg_size); i++) { - r <<= LITEX_SUBREG_SIZE_BIT; - reg += LITEX_SUBREG_ALIGN; - r |= _read_litex_subregister(reg); - } - return r; -} - static inline void litex_write8(void __iomem *reg, u8 val) { - _litex_set_reg(reg, sizeof(u8), val); + _write_litex_subregister(val, reg); } static inline void litex_write16(void __iomem *reg, u16 val) { - _litex_set_reg(reg, sizeof(u16), val); + _write_litex_subregister(val, reg); } static inline void litex_write32(void __iomem *reg, u32 val) { - _litex_set_reg(reg, sizeof(u32), val); + _write_litex_subregister(val, reg); } static inline void litex_write64(void __iomem *reg, u64 val) { - _litex_set_reg(reg, sizeof(u64), val); + _write_litex_subregister(val >> 32, reg); + _write_litex_subregister(val, reg + 4); } static inline u8 litex_read8(void __iomem *reg) { - return _litex_get_reg(reg, sizeof(u8)); + return _read_litex_subregister(reg); } static inline u16 litex_read16(void __iomem *reg) { - return _litex_get_reg(reg, sizeof(u16)); + return _read_litex_subregister(reg); } static inline u32 litex_read32(void __iomem *reg) { - return _litex_get_reg(reg, sizeof(u32)); + return _read_litex_subregister(reg); } static inline u64 litex_read64(void __iomem *reg) { - return _litex_get_reg(reg, sizeof(u64)); + return ((u64)_read_litex_subregister(reg) << 32) | + _read_litex_subregister(reg + 4); } #endif /* _LINUX_LITEX_H */ diff --git a/include/linux/lockd/xdr.h b/include/linux/lockd/xdr.h index 7ab9f26431..a98309c012 100644 --- a/include/linux/lockd/xdr.h +++ b/include/linux/lockd/xdr.h @@ -109,11 +109,5 @@ int nlmsvc_decode_shareargs(struct svc_rqst *, __be32 *); int nlmsvc_encode_shareres(struct svc_rqst *, __be32 *); int nlmsvc_decode_notify(struct svc_rqst *, __be32 *); int nlmsvc_decode_reboot(struct svc_rqst *, __be32 *); -/* -int nlmclt_encode_testargs(struct rpc_rqst *, u32 *, struct nlm_args *); -int nlmclt_encode_lockargs(struct rpc_rqst *, u32 *, struct nlm_args *); -int nlmclt_encode_cancargs(struct rpc_rqst *, u32 *, struct nlm_args *); -int nlmclt_encode_unlockargs(struct rpc_rqst *, u32 *, struct nlm_args *); - */ #endif /* LOCKD_XDR_H */ diff --git a/include/linux/lockd/xdr4.h b/include/linux/lockd/xdr4.h index e709fe5924..5ae766f26e 100644 --- a/include/linux/lockd/xdr4.h +++ b/include/linux/lockd/xdr4.h @@ -37,12 +37,7 @@ int nlm4svc_decode_shareargs(struct svc_rqst *, __be32 *); int nlm4svc_encode_shareres(struct svc_rqst *, __be32 *); int nlm4svc_decode_notify(struct svc_rqst *, __be32 *); int nlm4svc_decode_reboot(struct svc_rqst *, __be32 *); -/* -int nlmclt_encode_testargs(struct rpc_rqst *, u32 *, struct nlm_args *); -int nlmclt_encode_lockargs(struct rpc_rqst *, u32 *, struct nlm_args *); -int nlmclt_encode_cancargs(struct rpc_rqst *, u32 *, struct nlm_args *); -int nlmclt_encode_unlockargs(struct rpc_rqst *, u32 *, struct nlm_args *); - */ + extern const struct rpc_version nlm_version4; #endif /* LOCKD_XDR4_H */ diff --git a/include/linux/logic_iomem.h b/include/linux/logic_iomem.h new file mode 100644 index 0000000000..3fa65c9643 --- /dev/null +++ b/include/linux/logic_iomem.h @@ -0,0 +1,62 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2021 Intel Corporation + * Author: johannes@sipsolutions.net + */ +#ifndef __LOGIC_IOMEM_H +#define __LOGIC_IOMEM_H +#include +#include + +/** + * struct logic_iomem_ops - emulated IO memory ops + * @read: read an 8, 16, 32 or 64 bit quantity from the given offset, + * size is given in bytes (1, 2, 4 or 8) + * (64-bit only necessary if CONFIG_64BIT is set) + * @write: write an 8, 16 32 or 64 bit quantity to the given offset, + * size is given in bytes (1, 2, 4 or 8) + * (64-bit only necessary if CONFIG_64BIT is set) + * @set: optional, for memset_io() + * @copy_from: optional, for memcpy_fromio() + * @copy_to: optional, for memcpy_toio() + * @unmap: optional, this region is getting unmapped + */ +struct logic_iomem_ops { + unsigned long (*read)(void *priv, unsigned int offset, int size); + void (*write)(void *priv, unsigned int offset, int size, + unsigned long val); + + void (*set)(void *priv, unsigned int offset, u8 value, int size); + void (*copy_from)(void *priv, void *buffer, unsigned int offset, + int size); + void (*copy_to)(void *priv, unsigned int offset, const void *buffer, + int size); + + void (*unmap)(void *priv); +}; + +/** + * struct logic_iomem_region_ops - ops for an IO memory handler + * @map: map a range in the registered IO memory region, must + * fill *ops with the ops and may fill *priv to be passed + * to the ops. The offset is given as the offset into the + * registered resource region. + * The return value is negative for errors, or >= 0 for + * success. On success, the return value is added to the + * offset for later ops, to allow for partial mappings. + */ +struct logic_iomem_region_ops { + long (*map)(unsigned long offset, size_t size, + const struct logic_iomem_ops **ops, + void **priv); +}; + +/** + * logic_iomem_add_region - register an IO memory region + * @resource: the resource description for this region + * @ops: the IO memory mapping ops for this resource + */ +int logic_iomem_add_region(struct resource *resource, + const struct logic_iomem_region_ops *ops); + +#endif /* __LOGIC_IOMEM_H */ diff --git a/include/linux/lru_cache.h b/include/linux/lru_cache.h index 429d67d815..07add7882a 100644 --- a/include/linux/lru_cache.h +++ b/include/linux/lru_cache.h @@ -32,7 +32,7 @@ This header file (and its .c file; kernel-doc of functions see there) Because of this later property, it is called "lru_cache". As it actually Tracks Objects in an Active SeT, we could also call it toast (incidentally that is what may happen to the data on the - backend storage uppon next resync, if we don't get it right). + backend storage upon next resync, if we don't get it right). What for? @@ -152,7 +152,7 @@ struct lc_element { * for paranoia, and for "lc_element_to_index" */ unsigned lc_index; /* if we want to track a larger set of objects, - * it needs to become arch independend u64 */ + * it needs to become an architecture independent u64 */ unsigned lc_number; /* special label when on free list */ #define LC_FREE (~0U) @@ -263,7 +263,7 @@ extern void lc_seq_dump_details(struct seq_file *seq, struct lru_cache *lc, char * * Allows (expects) the set to be "dirty". Note that the reference counts and * order on the active and lru lists may still change. Used to serialize - * changing transactions. Returns true if we aquired the lock. + * changing transactions. Returns true if we acquired the lock. */ static inline int lc_try_lock_for_transaction(struct lru_cache *lc) { @@ -275,7 +275,7 @@ static inline int lc_try_lock_for_transaction(struct lru_cache *lc) * @lc: the lru cache to operate on * * Note that the reference counts and order on the active and lru lists may - * still change. Only works on a "clean" set. Returns true if we aquired the + * still change. Only works on a "clean" set. Returns true if we acquired the * lock, which means there are no pending changes, and any further attempt to * change the set will not succeed until the next lc_unlock(). */ diff --git a/include/linux/lsm_audit.h b/include/linux/lsm_audit.h index cd23355d22..17d02eda95 100644 --- a/include/linux/lsm_audit.h +++ b/include/linux/lsm_audit.h @@ -48,13 +48,13 @@ struct lsm_ioctlop_audit { }; struct lsm_ibpkey_audit { - u64 subnet_prefix; - u16 pkey; + u64 subnet_prefix; + u16 pkey; }; struct lsm_ibendport_audit { - char dev_name[IB_DEVICE_NAME_MAX]; - u8 port; + const char *dev_name; + u8 port; }; /* Auxiliary data to use in generating the audit record. */ diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h index 04c01794de..2adeea44c0 100644 --- a/include/linux/lsm_hook_defs.h +++ b/include/linux/lsm_hook_defs.h @@ -358,8 +358,7 @@ LSM_HOOK(int, 0, xfrm_state_alloc_acquire, struct xfrm_state *x, struct xfrm_sec_ctx *polsec, u32 secid) LSM_HOOK(void, LSM_RET_VOID, xfrm_state_free_security, struct xfrm_state *x) LSM_HOOK(int, 0, xfrm_state_delete_security, struct xfrm_state *x) -LSM_HOOK(int, 0, xfrm_policy_lookup, struct xfrm_sec_ctx *ctx, u32 fl_secid, - u8 dir) +LSM_HOOK(int, 0, xfrm_policy_lookup, struct xfrm_sec_ctx *ctx, u32 fl_secid) LSM_HOOK(int, 1, xfrm_state_pol_flow_match, struct xfrm_state *x, struct xfrm_policy *xp, const struct flowi_common *flic) LSM_HOOK(int, 0, xfrm_decode_session, struct sk_buff *skb, u32 *secid, diff --git a/include/linux/mailbox/mtk-cmdq-mailbox.h b/include/linux/mailbox/mtk-cmdq-mailbox.h index d5a983d65f..44365aab04 100644 --- a/include/linux/mailbox/mtk-cmdq-mailbox.h +++ b/include/linux/mailbox/mtk-cmdq-mailbox.h @@ -65,14 +65,10 @@ enum cmdq_code { CMDQ_CODE_LOGIC = 0xa0, }; -enum cmdq_cb_status { - CMDQ_CB_NORMAL = 0, - CMDQ_CB_ERROR -}; - struct cmdq_cb_data { - enum cmdq_cb_status sta; + int sta; void *data; + struct cmdq_pkt *pkt; }; typedef void (*cmdq_async_flush_cb)(struct cmdq_cb_data data); diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h index acee44b9db..0f06c2287b 100644 --- a/include/linux/marvell_phy.h +++ b/include/linux/marvell_phy.h @@ -22,14 +22,10 @@ #define MARVELL_PHY_ID_88E1545 0x01410ea0 #define MARVELL_PHY_ID_88E1548P 0x01410ec0 #define MARVELL_PHY_ID_88E3016 0x01410e60 +#define MARVELL_PHY_ID_88X3310 0x002b09a0 #define MARVELL_PHY_ID_88E2110 0x002b09b0 #define MARVELL_PHY_ID_88X2222 0x01410f10 -/* PHY IDs and mask for Alaska 10G PHYs */ -#define MARVELL_PHY_ID_88X33X0_MASK 0xfffffff8 -#define MARVELL_PHY_ID_88X3310 0x002b09a0 -#define MARVELL_PHY_ID_88X3340 0x002b09a8 - /* Marvel 88E1111 in Finisar SFP module with modified PHY ID */ #define MARVELL_PHY_ID_88E1111_FINISAR 0x01ff0cc0 diff --git a/include/linux/math64.h b/include/linux/math64.h index 66deb1fdc2..2928f03d6d 100644 --- a/include/linux/math64.h +++ b/include/linux/math64.h @@ -3,6 +3,7 @@ #define _LINUX_MATH64_H #include +#include #include #include @@ -234,6 +235,24 @@ static inline u64 mul_u64_u64_shr(u64 a, u64 b, unsigned int shift) #endif +#ifndef mul_s64_u64_shr +static inline u64 mul_s64_u64_shr(s64 a, u64 b, unsigned int shift) +{ + u64 ret; + + /* + * Extract the sign before the multiplication and put it back + * afterwards if needed. + */ + ret = mul_u64_u64_shr(abs(a), b, shift); + + if (a < 0) + ret = -((s64) ret); + + return ret; +} +#endif /* mul_s64_u64_shr */ + #ifndef mul_u64_u32_div static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor) { diff --git a/include/linux/mcb.h b/include/linux/mcb.h index 71dd10a3d9..f6efb16f9d 100644 --- a/include/linux/mcb.h +++ b/include/linux/mcb.h @@ -120,7 +120,7 @@ extern int __must_check __mcb_register_driver(struct mcb_driver *drv, __mcb_register_driver(driver, THIS_MODULE, KBUILD_MODNAME) extern void mcb_unregister_driver(struct mcb_driver *driver); #define module_mcb_driver(__mcb_driver) \ - module_driver(__mcb_driver, mcb_register_driver, mcb_unregister_driver); + module_driver(__mcb_driver, mcb_register_driver, mcb_unregister_driver) extern void mcb_bus_add_devices(const struct mcb_bus *bus); extern int mcb_device_register(struct mcb_bus *bus, struct mcb_device *dev); extern struct mcb_bus *mcb_alloc_bus(struct device *carrier); diff --git a/include/linux/mdev.h b/include/linux/mdev.h index 1fb34ea394..3a38598c26 100644 --- a/include/linux/mdev.h +++ b/include/linux/mdev.h @@ -55,6 +55,7 @@ struct device *mtype_get_parent_dev(struct mdev_type *mtype); * register the device to mdev module. * * @owner: The module owner. + * @device_driver: Which device driver to probe() on newly created devices * @dev_attr_groups: Attributes of the parent device. * @mdev_attr_groups: Attributes of the mediated device. * @supported_type_groups: Attributes to define supported types. It is mandatory @@ -103,6 +104,7 @@ struct device *mtype_get_parent_dev(struct mdev_type *mtype); **/ struct mdev_parent_ops { struct module *owner; + struct mdev_driver *device_driver; const struct attribute_group **dev_attr_groups; const struct attribute_group **mdev_attr_groups; struct attribute_group **supported_type_groups; diff --git a/include/linux/memblock.h b/include/linux/memblock.h index 5984fff3f1..cbf46f56d1 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -30,7 +30,9 @@ extern unsigned long long max_possible_pfn; * @MEMBLOCK_NONE: no special request * @MEMBLOCK_HOTPLUG: hotpluggable region * @MEMBLOCK_MIRROR: mirrored region - * @MEMBLOCK_NOMAP: don't add to kernel direct mapping + * @MEMBLOCK_NOMAP: don't add to kernel direct mapping and treat as + * reserved in the memory map; refer to memblock_mark_nomap() description + * for further details */ enum memblock_flags { MEMBLOCK_NONE = 0x0, /* No special request */ @@ -50,7 +52,7 @@ struct memblock_region { phys_addr_t base; phys_addr_t size; enum memblock_flags flags; -#ifdef CONFIG_NEED_MULTIPLE_NODES +#ifdef CONFIG_NUMA int nid; #endif }; @@ -347,7 +349,7 @@ int __init deferred_page_init_max_threads(const struct cpumask *node_cpumask); int memblock_set_node(phys_addr_t base, phys_addr_t size, struct memblock_type *type, int nid); -#ifdef CONFIG_NEED_MULTIPLE_NODES +#ifdef CONFIG_NUMA static inline void memblock_set_region_node(struct memblock_region *r, int nid) { r->nid = nid; @@ -366,7 +368,7 @@ static inline int memblock_get_region_node(const struct memblock_region *r) { return 0; } -#endif /* CONFIG_NEED_MULTIPLE_NODES */ +#endif /* CONFIG_NUMA */ /* Flags for memblock allocation APIs */ #define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0) diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index c193be7607..bfe5c486f4 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -192,7 +192,7 @@ enum memcg_kmem_state { struct memcg_padding { char x[0]; } ____cacheline_internodealigned_in_smp; -#define MEMCG_PADDING(name) struct memcg_padding name; +#define MEMCG_PADDING(name) struct memcg_padding name #else #define MEMCG_PADDING(name) #endif @@ -349,8 +349,7 @@ struct mem_cgroup { struct deferred_split deferred_split_queue; #endif - struct mem_cgroup_per_node *nodeinfo[0]; - /* WARNING: nodeinfo must be the last member here */ + struct mem_cgroup_per_node *nodeinfo[]; }; /* @@ -743,35 +742,18 @@ static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg, /** * mem_cgroup_page_lruvec - return lruvec for isolating/putting an LRU page * @page: the page - * @pgdat: pgdat of the page * * This function relies on page->mem_cgroup being stable. */ -static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page, - struct pglist_data *pgdat) +static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page) { + pg_data_t *pgdat = page_pgdat(page); struct mem_cgroup *memcg = page_memcg(page); VM_WARN_ON_ONCE_PAGE(!memcg && !mem_cgroup_disabled(), page); return mem_cgroup_lruvec(memcg, pgdat); } -static inline bool lruvec_holds_page_lru_lock(struct page *page, - struct lruvec *lruvec) -{ - pg_data_t *pgdat = page_pgdat(page); - const struct mem_cgroup *memcg; - struct mem_cgroup_per_node *mz; - - if (mem_cgroup_disabled()) - return lruvec == &pgdat->__lruvec; - - mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); - memcg = page_memcg(page) ? : root_mem_cgroup; - - return lruvec->pgdat == pgdat && mz->memcg == memcg; -} - struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm); @@ -1221,18 +1203,11 @@ static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg, return &pgdat->__lruvec; } -static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page, - struct pglist_data *pgdat) -{ - return &pgdat->__lruvec; -} - -static inline bool lruvec_holds_page_lru_lock(struct page *page, - struct lruvec *lruvec) +static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page) { pg_data_t *pgdat = page_pgdat(page); - return lruvec == &pgdat->__lruvec; + return &pgdat->__lruvec; } static inline void lruvec_memcg_debug(struct lruvec *lruvec, struct page *page) @@ -1255,6 +1230,12 @@ static inline struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm) return NULL; } +static inline +struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css) +{ + return NULL; +} + static inline void mem_cgroup_put(struct mem_cgroup *memcg) { } @@ -1516,12 +1497,19 @@ static inline void unlock_page_lruvec_irqrestore(struct lruvec *lruvec, spin_unlock_irqrestore(&lruvec->lru_lock, flags); } +/* Test requires a stable page->memcg binding, see page_memcg() */ +static inline bool page_matches_lruvec(struct page *page, struct lruvec *lruvec) +{ + return lruvec_pgdat(lruvec) == page_pgdat(page) && + lruvec_memcg(lruvec) == page_memcg(page); +} + /* Don't lock again iff page's lruvec locked */ static inline struct lruvec *relock_page_lruvec_irq(struct page *page, struct lruvec *locked_lruvec) { if (locked_lruvec) { - if (lruvec_holds_page_lru_lock(page, locked_lruvec)) + if (page_matches_lruvec(page, locked_lruvec)) return locked_lruvec; unlock_page_lruvec_irq(locked_lruvec); @@ -1535,7 +1523,7 @@ static inline struct lruvec *relock_page_lruvec_irqsave(struct page *page, struct lruvec *locked_lruvec, unsigned long *flags) { if (locked_lruvec) { - if (lruvec_holds_page_lru_lock(page, locked_lruvec)) + if (page_matches_lruvec(page, locked_lruvec)) return locked_lruvec; unlock_page_lruvec_irqrestore(locked_lruvec, *flags); @@ -1631,6 +1619,7 @@ static inline void set_shrinker_bit(struct mem_cgroup *memcg, #endif #ifdef CONFIG_MEMCG_KMEM +bool mem_cgroup_kmem_disabled(void); int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order); void __memcg_kmem_uncharge_page(struct page *page, int order); @@ -1684,6 +1673,10 @@ static inline int memcg_cache_id(struct mem_cgroup *memcg) struct mem_cgroup *mem_cgroup_from_obj(void *p); #else +static inline bool mem_cgroup_kmem_disabled(void) +{ + return true; +} static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order) diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index 28f32fd00f..a7fd2c3ccb 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -18,18 +18,6 @@ struct vmem_altmap; #ifdef CONFIG_MEMORY_HOTPLUG struct page *pfn_to_online_page(unsigned long pfn); -/* - * Types for free bootmem stored in page->lru.next. These have to be in - * some random range in unsigned long space for debugging purposes. - */ -enum { - MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE = 12, - SECTION_INFO = MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE, - MIX_SECTION_INFO, - NODE_INFO, - MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE = NODE_INFO, -}; - /* Types for control the zone type of onlined and offlined memory */ enum { /* Offline the memory. */ @@ -222,17 +210,6 @@ static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat) #endif /* CONFIG_NUMA */ #endif /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */ -#ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE -extern void __init register_page_bootmem_info_node(struct pglist_data *pgdat); -#else -static inline void register_page_bootmem_info_node(struct pglist_data *pgdat) -{ -} -#endif -extern void put_page_bootmem(struct page *page); -extern void get_page_bootmem(unsigned long ingo, struct page *page, - unsigned long type); - void get_online_mems(void); void put_online_mems(void); @@ -260,10 +237,6 @@ static inline void zone_span_writelock(struct zone *zone) {} static inline void zone_span_writeunlock(struct zone *zone) {} static inline void zone_seqlock_init(struct zone *zone) {} -static inline void register_page_bootmem_info_node(struct pglist_data *pgdat) -{ -} - static inline int try_online_node(int nid) { return 0; diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h index 5f1c74df26..0aaf91b496 100644 --- a/include/linux/mempolicy.h +++ b/include/linux/mempolicy.h @@ -46,11 +46,8 @@ struct mempolicy { atomic_t refcnt; unsigned short mode; /* See MPOL_* above */ unsigned short flags; /* See set_mempolicy() MPOL_F_* above */ - union { - short preferred_node; /* preferred */ - nodemask_t nodes; /* interleave/bind */ - /* undefined for default */ - } v; + nodemask_t nodes; /* interleave/bind/perfer */ + union { nodemask_t cpuset_mems_allowed; /* relative to these nodes */ nodemask_t user_nodemask; /* nodemask passed by user */ @@ -150,7 +147,7 @@ extern int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags, struct mempolicy **mpol, nodemask_t **nodemask); extern bool init_nodemask_of_mempolicy(nodemask_t *mask); -extern bool mempolicy_nodemask_intersects(struct task_struct *tsk, +extern bool mempolicy_in_oom_domain(struct task_struct *tsk, const nodemask_t *mask); extern nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy); diff --git a/include/linux/memremap.h b/include/linux/memremap.h index 45a79da89c..c0e9d35889 100644 --- a/include/linux/memremap.h +++ b/include/linux/memremap.h @@ -26,7 +26,7 @@ struct vmem_altmap { }; /* - * Specialize ZONE_DEVICE memory into multiple types each having differents + * Specialize ZONE_DEVICE memory into multiple types each has a different * usage. * * MEMORY_DEVICE_PRIVATE: diff --git a/include/linux/mfd/hi655x-pmic.h b/include/linux/mfd/hi655x-pmic.h index b061713221..af5d97239c 100644 --- a/include/linux/mfd/hi655x-pmic.h +++ b/include/linux/mfd/hi655x-pmic.h @@ -2,7 +2,7 @@ /* * Device driver for regulators in hi655x IC * - * Copyright (c) 2016 Hisilicon. + * Copyright (c) 2016 HiSilicon Ltd. * * Authors: * Chen Feng diff --git a/include/linux/mfd/lp87565.h b/include/linux/mfd/lp87565.h index 5640e6088f..4c895072d9 100644 --- a/include/linux/mfd/lp87565.h +++ b/include/linux/mfd/lp87565.h @@ -222,31 +222,20 @@ enum lp87565_device_type { #define LP87565_GPIO2_SEL BIT(1) #define LP87565_GPIO1_SEL BIT(0) -#define LP87565_GOIO3_OD BIT(6) -#define LP87565_GOIO2_OD BIT(5) -#define LP87565_GOIO1_OD BIT(4) -#define LP87565_GOIO3_DIR BIT(2) -#define LP87565_GOIO2_DIR BIT(1) -#define LP87565_GOIO1_DIR BIT(0) +#define LP87565_GPIO3_OD BIT(6) +#define LP87565_GPIO2_OD BIT(5) +#define LP87565_GPIO1_OD BIT(4) +#define LP87565_GPIO3_DIR BIT(2) +#define LP87565_GPIO2_DIR BIT(1) +#define LP87565_GPIO1_DIR BIT(0) -#define LP87565_GOIO3_IN BIT(2) -#define LP87565_GOIO2_IN BIT(1) -#define LP87565_GOIO1_IN BIT(0) +#define LP87565_GPIO3_IN BIT(2) +#define LP87565_GPIO2_IN BIT(1) +#define LP87565_GPIO1_IN BIT(0) -#define LP87565_GOIO3_OUT BIT(2) -#define LP87565_GOIO2_OUT BIT(1) -#define LP87565_GOIO1_OUT BIT(0) - -enum LP87565_regulator_id { - /* BUCK's */ - LP87565_BUCK_0, - LP87565_BUCK_1, - LP87565_BUCK_2, - LP87565_BUCK_3, - LP87565_BUCK_10, - LP87565_BUCK_23, - LP87565_BUCK_3210, -}; +#define LP87565_GPIO3_OUT BIT(2) +#define LP87565_GPIO2_OUT BIT(1) +#define LP87565_GPIO1_OUT BIT(0) /** * struct LP87565 - state holder for the LP87565 driver @@ -263,5 +252,6 @@ struct lp87565 { u8 rev; u8 dev_type; struct regmap *regmap; + struct gpio_desc *reset_gpio; }; #endif /* __LINUX_MFD_LP87565_H */ diff --git a/include/linux/mfd/madera/pdata.h b/include/linux/mfd/madera/pdata.h index 601cbbc103..32e3470708 100644 --- a/include/linux/mfd/madera/pdata.h +++ b/include/linux/mfd/madera/pdata.h @@ -31,7 +31,7 @@ struct pinctrl_map; * @irq_flags: Mode for primary IRQ (defaults to active low) * @gpio_base: Base GPIO number * @gpio_configs: Array of GPIO configurations (See - * Documentation/driver-api/pinctl.rst) + * Documentation/driver-api/pin-control.rst) * @n_gpio_configs: Number of entries in gpio_configs * @gpsw: General purpose switch mode setting. Depends on the external * hardware connected to the switch. (See the SW1_MODE field diff --git a/include/linux/mfd/mt6358/core.h b/include/linux/mfd/mt6358/core.h index c5a11b7458..68578e2019 100644 --- a/include/linux/mfd/mt6358/core.h +++ b/include/linux/mfd/mt6358/core.h @@ -6,12 +6,9 @@ #ifndef __MFD_MT6358_CORE_H__ #define __MFD_MT6358_CORE_H__ -#define MT6358_REG_WIDTH 16 - struct irq_top_t { int hwirq_base; unsigned int num_int_regs; - unsigned int num_int_bits; unsigned int en_reg; unsigned int en_reg_shift; unsigned int sta_reg; @@ -25,6 +22,7 @@ struct pmic_irq_data { unsigned short top_int_status_reg; bool *enable_hwirq; bool *cache_hwirq; + const struct irq_top_t *pmic_ints; }; enum mt6358_irq_top_status_shift { @@ -146,8 +144,8 @@ enum mt6358_irq_numbers { { \ .hwirq_base = MT6358_IRQ_##sp##_BASE, \ .num_int_regs = \ - ((MT6358_IRQ_##sp##_BITS - 1) / MT6358_REG_WIDTH) + 1, \ - .num_int_bits = MT6358_IRQ_##sp##_BITS, \ + ((MT6358_IRQ_##sp##_BITS - 1) / \ + MTK_PMIC_REG_WIDTH) + 1, \ .en_reg = MT6358_##sp##_TOP_INT_CON0, \ .en_reg_shift = 0x6, \ .sta_reg = MT6358_##sp##_TOP_INT_STATUS0, \ diff --git a/include/linux/mfd/mt6358/registers.h b/include/linux/mfd/mt6358/registers.h index 2ad0b312aa..201139b121 100644 --- a/include/linux/mfd/mt6358/registers.h +++ b/include/linux/mfd/mt6358/registers.h @@ -8,6 +8,8 @@ /* PMIC Registers */ #define MT6358_SWCID 0xa +#define MT6358_TOPSTATUS 0x28 +#define MT6358_TOP_RST_MISC 0x14c #define MT6358_MISC_TOP_INT_CON0 0x188 #define MT6358_MISC_TOP_INT_STATUS0 0x194 #define MT6358_TOP_INT_STATUS0 0x19e diff --git a/include/linux/mfd/mt6359/core.h b/include/linux/mfd/mt6359/core.h new file mode 100644 index 0000000000..8d29886812 --- /dev/null +++ b/include/linux/mfd/mt6359/core.h @@ -0,0 +1,133 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2021 MediaTek Inc. + */ + +#ifndef __MFD_MT6359_CORE_H__ +#define __MFD_MT6359_CORE_H__ + +enum mt6359_irq_top_status_shift { + MT6359_BUCK_TOP = 0, + MT6359_LDO_TOP, + MT6359_PSC_TOP, + MT6359_SCK_TOP, + MT6359_BM_TOP, + MT6359_HK_TOP, + MT6359_AUD_TOP = 7, + MT6359_MISC_TOP, +}; + +enum mt6359_irq_numbers { + MT6359_IRQ_VCORE_OC = 1, + MT6359_IRQ_VGPU11_OC, + MT6359_IRQ_VGPU12_OC, + MT6359_IRQ_VMODEM_OC, + MT6359_IRQ_VPROC1_OC, + MT6359_IRQ_VPROC2_OC, + MT6359_IRQ_VS1_OC, + MT6359_IRQ_VS2_OC, + MT6359_IRQ_VPA_OC = 9, + MT6359_IRQ_VFE28_OC = 16, + MT6359_IRQ_VXO22_OC, + MT6359_IRQ_VRF18_OC, + MT6359_IRQ_VRF12_OC, + MT6359_IRQ_VEFUSE_OC, + MT6359_IRQ_VCN33_1_OC, + MT6359_IRQ_VCN33_2_OC, + MT6359_IRQ_VCN13_OC, + MT6359_IRQ_VCN18_OC, + MT6359_IRQ_VA09_OC, + MT6359_IRQ_VCAMIO_OC, + MT6359_IRQ_VA12_OC, + MT6359_IRQ_VAUX18_OC, + MT6359_IRQ_VAUD18_OC, + MT6359_IRQ_VIO18_OC, + MT6359_IRQ_VSRAM_PROC1_OC, + MT6359_IRQ_VSRAM_PROC2_OC, + MT6359_IRQ_VSRAM_OTHERS_OC, + MT6359_IRQ_VSRAM_MD_OC, + MT6359_IRQ_VEMC_OC, + MT6359_IRQ_VSIM1_OC, + MT6359_IRQ_VSIM2_OC, + MT6359_IRQ_VUSB_OC, + MT6359_IRQ_VRFCK_OC, + MT6359_IRQ_VBBCK_OC, + MT6359_IRQ_VBIF28_OC, + MT6359_IRQ_VIBR_OC, + MT6359_IRQ_VIO28_OC, + MT6359_IRQ_VM18_OC, + MT6359_IRQ_VUFS_OC = 45, + MT6359_IRQ_PWRKEY = 48, + MT6359_IRQ_HOMEKEY, + MT6359_IRQ_PWRKEY_R, + MT6359_IRQ_HOMEKEY_R, + MT6359_IRQ_NI_LBAT_INT, + MT6359_IRQ_CHRDET_EDGE = 53, + MT6359_IRQ_RTC = 64, + MT6359_IRQ_FG_BAT_H = 80, + MT6359_IRQ_FG_BAT_L, + MT6359_IRQ_FG_CUR_H, + MT6359_IRQ_FG_CUR_L, + MT6359_IRQ_FG_ZCV = 84, + MT6359_IRQ_FG_N_CHARGE_L = 87, + MT6359_IRQ_FG_IAVG_H, + MT6359_IRQ_FG_IAVG_L = 89, + MT6359_IRQ_FG_DISCHARGE = 91, + MT6359_IRQ_FG_CHARGE, + MT6359_IRQ_BATON_LV = 96, + MT6359_IRQ_BATON_BAT_IN = 98, + MT6359_IRQ_BATON_BAT_OU, + MT6359_IRQ_BIF = 100, + MT6359_IRQ_BAT_H = 112, + MT6359_IRQ_BAT_L, + MT6359_IRQ_BAT2_H, + MT6359_IRQ_BAT2_L, + MT6359_IRQ_BAT_TEMP_H, + MT6359_IRQ_BAT_TEMP_L, + MT6359_IRQ_THR_H, + MT6359_IRQ_THR_L, + MT6359_IRQ_AUXADC_IMP, + MT6359_IRQ_NAG_C_DLTV = 121, + MT6359_IRQ_AUDIO = 128, + MT6359_IRQ_ACCDET = 133, + MT6359_IRQ_ACCDET_EINT0, + MT6359_IRQ_ACCDET_EINT1, + MT6359_IRQ_SPI_CMD_ALERT = 144, + MT6359_IRQ_NR, +}; + +#define MT6359_IRQ_BUCK_BASE MT6359_IRQ_VCORE_OC +#define MT6359_IRQ_LDO_BASE MT6359_IRQ_VFE28_OC +#define MT6359_IRQ_PSC_BASE MT6359_IRQ_PWRKEY +#define MT6359_IRQ_SCK_BASE MT6359_IRQ_RTC +#define MT6359_IRQ_BM_BASE MT6359_IRQ_FG_BAT_H +#define MT6359_IRQ_HK_BASE MT6359_IRQ_BAT_H +#define MT6359_IRQ_AUD_BASE MT6359_IRQ_AUDIO +#define MT6359_IRQ_MISC_BASE MT6359_IRQ_SPI_CMD_ALERT + +#define MT6359_IRQ_BUCK_BITS (MT6359_IRQ_VPA_OC - MT6359_IRQ_BUCK_BASE + 1) +#define MT6359_IRQ_LDO_BITS (MT6359_IRQ_VUFS_OC - MT6359_IRQ_LDO_BASE + 1) +#define MT6359_IRQ_PSC_BITS \ + (MT6359_IRQ_CHRDET_EDGE - MT6359_IRQ_PSC_BASE + 1) +#define MT6359_IRQ_SCK_BITS (MT6359_IRQ_RTC - MT6359_IRQ_SCK_BASE + 1) +#define MT6359_IRQ_BM_BITS (MT6359_IRQ_BIF - MT6359_IRQ_BM_BASE + 1) +#define MT6359_IRQ_HK_BITS (MT6359_IRQ_NAG_C_DLTV - MT6359_IRQ_HK_BASE + 1) +#define MT6359_IRQ_AUD_BITS \ + (MT6359_IRQ_ACCDET_EINT1 - MT6359_IRQ_AUD_BASE + 1) +#define MT6359_IRQ_MISC_BITS \ + (MT6359_IRQ_SPI_CMD_ALERT - MT6359_IRQ_MISC_BASE + 1) + +#define MT6359_TOP_GEN(sp) \ +{ \ + .hwirq_base = MT6359_IRQ_##sp##_BASE, \ + .num_int_regs = \ + ((MT6359_IRQ_##sp##_BITS - 1) / \ + MTK_PMIC_REG_WIDTH) + 1, \ + .en_reg = MT6359_##sp##_TOP_INT_CON0, \ + .en_reg_shift = 0x6, \ + .sta_reg = MT6359_##sp##_TOP_INT_STATUS0, \ + .sta_reg_shift = 0x2, \ + .top_offset = MT6359_##sp##_TOP, \ +} + +#endif /* __MFD_MT6359_CORE_H__ */ diff --git a/include/linux/mfd/mt6359/registers.h b/include/linux/mfd/mt6359/registers.h new file mode 100644 index 0000000000..2135c96959 --- /dev/null +++ b/include/linux/mfd/mt6359/registers.h @@ -0,0 +1,529 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2021 MediaTek Inc. + */ + +#ifndef __MFD_MT6359_REGISTERS_H__ +#define __MFD_MT6359_REGISTERS_H__ + +/* PMIC Registers */ +#define MT6359_SWCID 0xa +#define MT6359_MISC_TOP_INT_CON0 0x188 +#define MT6359_MISC_TOP_INT_STATUS0 0x194 +#define MT6359_TOP_INT_STATUS0 0x19e +#define MT6359_SCK_TOP_INT_CON0 0x528 +#define MT6359_SCK_TOP_INT_STATUS0 0x534 +#define MT6359_EOSC_CALI_CON0 0x53a +#define MT6359_EOSC_CALI_CON1 0x53c +#define MT6359_RTC_MIX_CON0 0x53e +#define MT6359_RTC_MIX_CON1 0x540 +#define MT6359_RTC_MIX_CON2 0x542 +#define MT6359_RTC_DSN_ID 0x580 +#define MT6359_RTC_DSN_REV0 0x582 +#define MT6359_RTC_DBI 0x584 +#define MT6359_RTC_DXI 0x586 +#define MT6359_RTC_BBPU 0x588 +#define MT6359_RTC_IRQ_STA 0x58a +#define MT6359_RTC_IRQ_EN 0x58c +#define MT6359_RTC_CII_EN 0x58e +#define MT6359_RTC_AL_MASK 0x590 +#define MT6359_RTC_TC_SEC 0x592 +#define MT6359_RTC_TC_MIN 0x594 +#define MT6359_RTC_TC_HOU 0x596 +#define MT6359_RTC_TC_DOM 0x598 +#define MT6359_RTC_TC_DOW 0x59a +#define MT6359_RTC_TC_MTH 0x59c +#define MT6359_RTC_TC_YEA 0x59e +#define MT6359_RTC_AL_SEC 0x5a0 +#define MT6359_RTC_AL_MIN 0x5a2 +#define MT6359_RTC_AL_HOU 0x5a4 +#define MT6359_RTC_AL_DOM 0x5a6 +#define MT6359_RTC_AL_DOW 0x5a8 +#define MT6359_RTC_AL_MTH 0x5aa +#define MT6359_RTC_AL_YEA 0x5ac +#define MT6359_RTC_OSC32CON 0x5ae +#define MT6359_RTC_POWERKEY1 0x5b0 +#define MT6359_RTC_POWERKEY2 0x5b2 +#define MT6359_RTC_PDN1 0x5b4 +#define MT6359_RTC_PDN2 0x5b6 +#define MT6359_RTC_SPAR0 0x5b8 +#define MT6359_RTC_SPAR1 0x5ba +#define MT6359_RTC_PROT 0x5bc +#define MT6359_RTC_DIFF 0x5be +#define MT6359_RTC_CALI 0x5c0 +#define MT6359_RTC_WRTGR 0x5c2 +#define MT6359_RTC_CON 0x5c4 +#define MT6359_RTC_SEC_CTRL 0x5c6 +#define MT6359_RTC_INT_CNT 0x5c8 +#define MT6359_RTC_SEC_DAT0 0x5ca +#define MT6359_RTC_SEC_DAT1 0x5cc +#define MT6359_RTC_SEC_DAT2 0x5ce +#define MT6359_RTC_SEC_DSN_ID 0x600 +#define MT6359_RTC_SEC_DSN_REV0 0x602 +#define MT6359_RTC_SEC_DBI 0x604 +#define MT6359_RTC_SEC_DXI 0x606 +#define MT6359_RTC_TC_SEC_SEC 0x608 +#define MT6359_RTC_TC_MIN_SEC 0x60a +#define MT6359_RTC_TC_HOU_SEC 0x60c +#define MT6359_RTC_TC_DOM_SEC 0x60e +#define MT6359_RTC_TC_DOW_SEC 0x610 +#define MT6359_RTC_TC_MTH_SEC 0x612 +#define MT6359_RTC_TC_YEA_SEC 0x614 +#define MT6359_RTC_SEC_CK_PDN 0x616 +#define MT6359_RTC_SEC_WRTGR 0x618 +#define MT6359_PSC_TOP_INT_CON0 0x910 +#define MT6359_PSC_TOP_INT_STATUS0 0x91c +#define MT6359_BM_TOP_INT_CON0 0xc32 +#define MT6359_BM_TOP_INT_CON1 0xc38 +#define MT6359_BM_TOP_INT_STATUS0 0xc4a +#define MT6359_BM_TOP_INT_STATUS1 0xc4c +#define MT6359_HK_TOP_INT_CON0 0xf92 +#define MT6359_HK_TOP_INT_STATUS0 0xf9e +#define MT6359_BUCK_TOP_INT_CON0 0x1418 +#define MT6359_BUCK_TOP_INT_STATUS0 0x1424 +#define MT6359_BUCK_VPU_CON0 0x1488 +#define MT6359_BUCK_VPU_DBG0 0x14a6 +#define MT6359_BUCK_VPU_DBG1 0x14a8 +#define MT6359_BUCK_VPU_ELR0 0x14ac +#define MT6359_BUCK_VCORE_CON0 0x1508 +#define MT6359_BUCK_VCORE_DBG0 0x1526 +#define MT6359_BUCK_VCORE_DBG1 0x1528 +#define MT6359_BUCK_VCORE_SSHUB_CON0 0x152a +#define MT6359_BUCK_VCORE_ELR0 0x1534 +#define MT6359_BUCK_VGPU11_CON0 0x1588 +#define MT6359_BUCK_VGPU11_DBG0 0x15a6 +#define MT6359_BUCK_VGPU11_DBG1 0x15a8 +#define MT6359_BUCK_VGPU11_ELR0 0x15ac +#define MT6359_BUCK_VMODEM_CON0 0x1688 +#define MT6359_BUCK_VMODEM_DBG0 0x16a6 +#define MT6359_BUCK_VMODEM_DBG1 0x16a8 +#define MT6359_BUCK_VMODEM_ELR0 0x16ae +#define MT6359_BUCK_VPROC1_CON0 0x1708 +#define MT6359_BUCK_VPROC1_DBG0 0x1726 +#define MT6359_BUCK_VPROC1_DBG1 0x1728 +#define MT6359_BUCK_VPROC1_ELR0 0x172e +#define MT6359_BUCK_VPROC2_CON0 0x1788 +#define MT6359_BUCK_VPROC2_DBG0 0x17a6 +#define MT6359_BUCK_VPROC2_DBG1 0x17a8 +#define MT6359_BUCK_VPROC2_ELR0 0x17b2 +#define MT6359_BUCK_VS1_CON0 0x1808 +#define MT6359_BUCK_VS1_DBG0 0x1826 +#define MT6359_BUCK_VS1_DBG1 0x1828 +#define MT6359_BUCK_VS1_ELR0 0x1834 +#define MT6359_BUCK_VS2_CON0 0x1888 +#define MT6359_BUCK_VS2_DBG0 0x18a6 +#define MT6359_BUCK_VS2_DBG1 0x18a8 +#define MT6359_BUCK_VS2_ELR0 0x18b4 +#define MT6359_BUCK_VPA_CON0 0x1908 +#define MT6359_BUCK_VPA_CON1 0x190e +#define MT6359_BUCK_VPA_CFG0 0x1910 +#define MT6359_BUCK_VPA_CFG1 0x1912 +#define MT6359_BUCK_VPA_DBG0 0x1914 +#define MT6359_BUCK_VPA_DBG1 0x1916 +#define MT6359_VGPUVCORE_ANA_CON2 0x198e +#define MT6359_VGPUVCORE_ANA_CON13 0x19a4 +#define MT6359_VPROC1_ANA_CON3 0x19b2 +#define MT6359_VPROC2_ANA_CON3 0x1a0e +#define MT6359_VMODEM_ANA_CON3 0x1a1a +#define MT6359_VPU_ANA_CON3 0x1a26 +#define MT6359_VS1_ANA_CON0 0x1a2c +#define MT6359_VS2_ANA_CON0 0x1a34 +#define MT6359_VPA_ANA_CON0 0x1a3c +#define MT6359_LDO_TOP_INT_CON0 0x1b14 +#define MT6359_LDO_TOP_INT_CON1 0x1b1a +#define MT6359_LDO_TOP_INT_STATUS0 0x1b28 +#define MT6359_LDO_TOP_INT_STATUS1 0x1b2a +#define MT6359_LDO_VSRAM_PROC1_ELR 0x1b40 +#define MT6359_LDO_VSRAM_PROC2_ELR 0x1b42 +#define MT6359_LDO_VSRAM_OTHERS_ELR 0x1b44 +#define MT6359_LDO_VSRAM_MD_ELR 0x1b46 +#define MT6359_LDO_VFE28_CON0 0x1b88 +#define MT6359_LDO_VFE28_MON 0x1b8a +#define MT6359_LDO_VXO22_CON0 0x1b98 +#define MT6359_LDO_VXO22_MON 0x1b9a +#define MT6359_LDO_VRF18_CON0 0x1ba8 +#define MT6359_LDO_VRF18_MON 0x1baa +#define MT6359_LDO_VRF12_CON0 0x1bb8 +#define MT6359_LDO_VRF12_MON 0x1bba +#define MT6359_LDO_VEFUSE_CON0 0x1bc8 +#define MT6359_LDO_VEFUSE_MON 0x1bca +#define MT6359_LDO_VCN33_1_CON0 0x1bd8 +#define MT6359_LDO_VCN33_1_MON 0x1bda +#define MT6359_LDO_VCN33_1_MULTI_SW 0x1be8 +#define MT6359_LDO_VCN33_2_CON0 0x1c08 +#define MT6359_LDO_VCN33_2_MON 0x1c0a +#define MT6359_LDO_VCN33_2_MULTI_SW 0x1c18 +#define MT6359_LDO_VCN13_CON0 0x1c1a +#define MT6359_LDO_VCN13_MON 0x1c1c +#define MT6359_LDO_VCN18_CON0 0x1c2a +#define MT6359_LDO_VCN18_MON 0x1c2c +#define MT6359_LDO_VA09_CON0 0x1c3a +#define MT6359_LDO_VA09_MON 0x1c3c +#define MT6359_LDO_VCAMIO_CON0 0x1c4a +#define MT6359_LDO_VCAMIO_MON 0x1c4c +#define MT6359_LDO_VA12_CON0 0x1c5a +#define MT6359_LDO_VA12_MON 0x1c5c +#define MT6359_LDO_VAUX18_CON0 0x1c88 +#define MT6359_LDO_VAUX18_MON 0x1c8a +#define MT6359_LDO_VAUD18_CON0 0x1c98 +#define MT6359_LDO_VAUD18_MON 0x1c9a +#define MT6359_LDO_VIO18_CON0 0x1ca8 +#define MT6359_LDO_VIO18_MON 0x1caa +#define MT6359_LDO_VEMC_CON0 0x1cb8 +#define MT6359_LDO_VEMC_MON 0x1cba +#define MT6359_LDO_VSIM1_CON0 0x1cc8 +#define MT6359_LDO_VSIM1_MON 0x1cca +#define MT6359_LDO_VSIM2_CON0 0x1cd8 +#define MT6359_LDO_VSIM2_MON 0x1cda +#define MT6359_LDO_VUSB_CON0 0x1d08 +#define MT6359_LDO_VUSB_MON 0x1d0a +#define MT6359_LDO_VUSB_MULTI_SW 0x1d18 +#define MT6359_LDO_VRFCK_CON0 0x1d1a +#define MT6359_LDO_VRFCK_MON 0x1d1c +#define MT6359_LDO_VBBCK_CON0 0x1d2a +#define MT6359_LDO_VBBCK_MON 0x1d2c +#define MT6359_LDO_VBIF28_CON0 0x1d3a +#define MT6359_LDO_VBIF28_MON 0x1d3c +#define MT6359_LDO_VIBR_CON0 0x1d4a +#define MT6359_LDO_VIBR_MON 0x1d4c +#define MT6359_LDO_VIO28_CON0 0x1d5a +#define MT6359_LDO_VIO28_MON 0x1d5c +#define MT6359_LDO_VM18_CON0 0x1d88 +#define MT6359_LDO_VM18_MON 0x1d8a +#define MT6359_LDO_VUFS_CON0 0x1d98 +#define MT6359_LDO_VUFS_MON 0x1d9a +#define MT6359_LDO_VSRAM_PROC1_CON0 0x1e88 +#define MT6359_LDO_VSRAM_PROC1_MON 0x1e8a +#define MT6359_LDO_VSRAM_PROC1_VOSEL1 0x1e8e +#define MT6359_LDO_VSRAM_PROC2_CON0 0x1ea6 +#define MT6359_LDO_VSRAM_PROC2_MON 0x1ea8 +#define MT6359_LDO_VSRAM_PROC2_VOSEL1 0x1eac +#define MT6359_LDO_VSRAM_OTHERS_CON0 0x1f08 +#define MT6359_LDO_VSRAM_OTHERS_MON 0x1f0a +#define MT6359_LDO_VSRAM_OTHERS_VOSEL1 0x1f0e +#define MT6359_LDO_VSRAM_OTHERS_SSHUB 0x1f26 +#define MT6359_LDO_VSRAM_MD_CON0 0x1f2c +#define MT6359_LDO_VSRAM_MD_MON 0x1f2e +#define MT6359_LDO_VSRAM_MD_VOSEL1 0x1f32 +#define MT6359_VFE28_ANA_CON0 0x1f88 +#define MT6359_VAUX18_ANA_CON0 0x1f8c +#define MT6359_VUSB_ANA_CON0 0x1f90 +#define MT6359_VBIF28_ANA_CON0 0x1f94 +#define MT6359_VCN33_1_ANA_CON0 0x1f98 +#define MT6359_VCN33_2_ANA_CON0 0x1f9c +#define MT6359_VEMC_ANA_CON0 0x1fa0 +#define MT6359_VSIM1_ANA_CON0 0x1fa4 +#define MT6359_VSIM2_ANA_CON0 0x1fa8 +#define MT6359_VIO28_ANA_CON0 0x1fac +#define MT6359_VIBR_ANA_CON0 0x1fb0 +#define MT6359_VRF18_ANA_CON0 0x2008 +#define MT6359_VEFUSE_ANA_CON0 0x200c +#define MT6359_VCN18_ANA_CON0 0x2010 +#define MT6359_VCAMIO_ANA_CON0 0x2014 +#define MT6359_VAUD18_ANA_CON0 0x2018 +#define MT6359_VIO18_ANA_CON0 0x201c +#define MT6359_VM18_ANA_CON0 0x2020 +#define MT6359_VUFS_ANA_CON0 0x2024 +#define MT6359_VRF12_ANA_CON0 0x202a +#define MT6359_VCN13_ANA_CON0 0x202e +#define MT6359_VA09_ANA_CON0 0x2032 +#define MT6359_VA12_ANA_CON0 0x2036 +#define MT6359_VXO22_ANA_CON0 0x2088 +#define MT6359_VRFCK_ANA_CON0 0x208c +#define MT6359_VBBCK_ANA_CON0 0x2094 +#define MT6359_AUD_TOP_INT_CON0 0x2328 +#define MT6359_AUD_TOP_INT_STATUS0 0x2334 + +#define MT6359_RG_BUCK_VPU_EN_ADDR MT6359_BUCK_VPU_CON0 +#define MT6359_RG_BUCK_VPU_LP_ADDR MT6359_BUCK_VPU_CON0 +#define MT6359_RG_BUCK_VPU_LP_SHIFT 1 +#define MT6359_DA_VPU_VOSEL_ADDR MT6359_BUCK_VPU_DBG0 +#define MT6359_DA_VPU_VOSEL_MASK 0x7F +#define MT6359_DA_VPU_VOSEL_SHIFT 0 +#define MT6359_DA_VPU_EN_ADDR MT6359_BUCK_VPU_DBG1 +#define MT6359_RG_BUCK_VPU_VOSEL_ADDR MT6359_BUCK_VPU_ELR0 +#define MT6359_RG_BUCK_VPU_VOSEL_MASK 0x7F +#define MT6359_RG_BUCK_VPU_VOSEL_SHIFT 0 +#define MT6359_RG_BUCK_VCORE_EN_ADDR MT6359_BUCK_VCORE_CON0 +#define MT6359_RG_BUCK_VCORE_LP_ADDR MT6359_BUCK_VCORE_CON0 +#define MT6359_RG_BUCK_VCORE_LP_SHIFT 1 +#define MT6359_DA_VCORE_VOSEL_ADDR MT6359_BUCK_VCORE_DBG0 +#define MT6359_DA_VCORE_VOSEL_MASK 0x7F +#define MT6359_DA_VCORE_VOSEL_SHIFT 0 +#define MT6359_DA_VCORE_EN_ADDR MT6359_BUCK_VCORE_DBG1 +#define MT6359_RG_BUCK_VCORE_SSHUB_EN_ADDR MT6359_BUCK_VCORE_SSHUB_CON0 +#define MT6359_RG_BUCK_VCORE_SSHUB_VOSEL_ADDR MT6359_BUCK_VCORE_SSHUB_CON0 +#define MT6359_RG_BUCK_VCORE_SSHUB_VOSEL_MASK 0x7F +#define MT6359_RG_BUCK_VCORE_SSHUB_VOSEL_SHIFT 4 +#define MT6359_RG_BUCK_VCORE_VOSEL_ADDR MT6359_BUCK_VCORE_ELR0 +#define MT6359_RG_BUCK_VCORE_VOSEL_MASK 0x7F +#define MT6359_RG_BUCK_VCORE_VOSEL_SHIFT 0 +#define MT6359_RG_BUCK_VGPU11_EN_ADDR MT6359_BUCK_VGPU11_CON0 +#define MT6359_RG_BUCK_VGPU11_LP_ADDR MT6359_BUCK_VGPU11_CON0 +#define MT6359_RG_BUCK_VGPU11_LP_SHIFT 1 +#define MT6359_DA_VGPU11_VOSEL_ADDR MT6359_BUCK_VGPU11_DBG0 +#define MT6359_DA_VGPU11_VOSEL_MASK 0x7F +#define MT6359_DA_VGPU11_VOSEL_SHIFT 0 +#define MT6359_DA_VGPU11_EN_ADDR MT6359_BUCK_VGPU11_DBG1 +#define MT6359_RG_BUCK_VGPU11_VOSEL_ADDR MT6359_BUCK_VGPU11_ELR0 +#define MT6359_RG_BUCK_VGPU11_VOSEL_MASK 0x7F +#define MT6359_RG_BUCK_VGPU11_VOSEL_SHIFT 0 +#define MT6359_RG_BUCK_VMODEM_EN_ADDR MT6359_BUCK_VMODEM_CON0 +#define MT6359_RG_BUCK_VMODEM_LP_ADDR MT6359_BUCK_VMODEM_CON0 +#define MT6359_RG_BUCK_VMODEM_LP_SHIFT 1 +#define MT6359_DA_VMODEM_VOSEL_ADDR MT6359_BUCK_VMODEM_DBG0 +#define MT6359_DA_VMODEM_VOSEL_MASK 0x7F +#define MT6359_DA_VMODEM_VOSEL_SHIFT 0 +#define MT6359_DA_VMODEM_EN_ADDR MT6359_BUCK_VMODEM_DBG1 +#define MT6359_RG_BUCK_VMODEM_VOSEL_ADDR MT6359_BUCK_VMODEM_ELR0 +#define MT6359_RG_BUCK_VMODEM_VOSEL_MASK 0x7F +#define MT6359_RG_BUCK_VMODEM_VOSEL_SHIFT 0 +#define MT6359_RG_BUCK_VPROC1_EN_ADDR MT6359_BUCK_VPROC1_CON0 +#define MT6359_RG_BUCK_VPROC1_LP_ADDR MT6359_BUCK_VPROC1_CON0 +#define MT6359_RG_BUCK_VPROC1_LP_SHIFT 1 +#define MT6359_DA_VPROC1_VOSEL_ADDR MT6359_BUCK_VPROC1_DBG0 +#define MT6359_DA_VPROC1_VOSEL_MASK 0x7F +#define MT6359_DA_VPROC1_VOSEL_SHIFT 0 +#define MT6359_DA_VPROC1_EN_ADDR MT6359_BUCK_VPROC1_DBG1 +#define MT6359_RG_BUCK_VPROC1_VOSEL_ADDR MT6359_BUCK_VPROC1_ELR0 +#define MT6359_RG_BUCK_VPROC1_VOSEL_MASK 0x7F +#define MT6359_RG_BUCK_VPROC1_VOSEL_SHIFT 0 +#define MT6359_RG_BUCK_VPROC2_EN_ADDR MT6359_BUCK_VPROC2_CON0 +#define MT6359_RG_BUCK_VPROC2_LP_ADDR MT6359_BUCK_VPROC2_CON0 +#define MT6359_RG_BUCK_VPROC2_LP_SHIFT 1 +#define MT6359_DA_VPROC2_VOSEL_ADDR MT6359_BUCK_VPROC2_DBG0 +#define MT6359_DA_VPROC2_VOSEL_MASK 0x7F +#define MT6359_DA_VPROC2_VOSEL_SHIFT 0 +#define MT6359_DA_VPROC2_EN_ADDR MT6359_BUCK_VPROC2_DBG1 +#define MT6359_RG_BUCK_VPROC2_VOSEL_ADDR MT6359_BUCK_VPROC2_ELR0 +#define MT6359_RG_BUCK_VPROC2_VOSEL_MASK 0x7F +#define MT6359_RG_BUCK_VPROC2_VOSEL_SHIFT 0 +#define MT6359_RG_BUCK_VS1_EN_ADDR MT6359_BUCK_VS1_CON0 +#define MT6359_RG_BUCK_VS1_LP_ADDR MT6359_BUCK_VS1_CON0 +#define MT6359_RG_BUCK_VS1_LP_SHIFT 1 +#define MT6359_DA_VS1_VOSEL_ADDR MT6359_BUCK_VS1_DBG0 +#define MT6359_DA_VS1_VOSEL_MASK 0x7F +#define MT6359_DA_VS1_VOSEL_SHIFT 0 +#define MT6359_DA_VS1_EN_ADDR MT6359_BUCK_VS1_DBG1 +#define MT6359_RG_BUCK_VS1_VOSEL_ADDR MT6359_BUCK_VS1_ELR0 +#define MT6359_RG_BUCK_VS1_VOSEL_MASK 0x7F +#define MT6359_RG_BUCK_VS1_VOSEL_SHIFT 0 +#define MT6359_RG_BUCK_VS2_EN_ADDR MT6359_BUCK_VS2_CON0 +#define MT6359_RG_BUCK_VS2_LP_ADDR MT6359_BUCK_VS2_CON0 +#define MT6359_RG_BUCK_VS2_LP_SHIFT 1 +#define MT6359_DA_VS2_VOSEL_ADDR MT6359_BUCK_VS2_DBG0 +#define MT6359_DA_VS2_VOSEL_MASK 0x7F +#define MT6359_DA_VS2_VOSEL_SHIFT 0 +#define MT6359_DA_VS2_EN_ADDR MT6359_BUCK_VS2_DBG1 +#define MT6359_RG_BUCK_VS2_VOSEL_ADDR MT6359_BUCK_VS2_ELR0 +#define MT6359_RG_BUCK_VS2_VOSEL_MASK 0x7F +#define MT6359_RG_BUCK_VS2_VOSEL_SHIFT 0 +#define MT6359_RG_BUCK_VPA_EN_ADDR MT6359_BUCK_VPA_CON0 +#define MT6359_RG_BUCK_VPA_LP_ADDR MT6359_BUCK_VPA_CON0 +#define MT6359_RG_BUCK_VPA_LP_SHIFT 1 +#define MT6359_RG_BUCK_VPA_VOSEL_ADDR MT6359_BUCK_VPA_CON1 +#define MT6359_RG_BUCK_VPA_VOSEL_MASK 0x3F +#define MT6359_RG_BUCK_VPA_VOSEL_SHIFT 0 +#define MT6359_DA_VPA_VOSEL_ADDR MT6359_BUCK_VPA_DBG0 +#define MT6359_DA_VPA_VOSEL_MASK 0x3F +#define MT6359_DA_VPA_VOSEL_SHIFT 0 +#define MT6359_DA_VPA_EN_ADDR MT6359_BUCK_VPA_DBG1 +#define MT6359_RG_VGPU11_FCCM_ADDR MT6359_VGPUVCORE_ANA_CON2 +#define MT6359_RG_VGPU11_FCCM_SHIFT 9 +#define MT6359_RG_VCORE_FCCM_ADDR MT6359_VGPUVCORE_ANA_CON13 +#define MT6359_RG_VCORE_FCCM_SHIFT 5 +#define MT6359_RG_VPROC1_FCCM_ADDR MT6359_VPROC1_ANA_CON3 +#define MT6359_RG_VPROC1_FCCM_SHIFT 1 +#define MT6359_RG_VPROC2_FCCM_ADDR MT6359_VPROC2_ANA_CON3 +#define MT6359_RG_VPROC2_FCCM_SHIFT 1 +#define MT6359_RG_VMODEM_FCCM_ADDR MT6359_VMODEM_ANA_CON3 +#define MT6359_RG_VMODEM_FCCM_SHIFT 1 +#define MT6359_RG_VPU_FCCM_ADDR MT6359_VPU_ANA_CON3 +#define MT6359_RG_VPU_FCCM_SHIFT 1 +#define MT6359_RG_VS1_FPWM_ADDR MT6359_VS1_ANA_CON0 +#define MT6359_RG_VS1_FPWM_SHIFT 3 +#define MT6359_RG_VS2_FPWM_ADDR MT6359_VS2_ANA_CON0 +#define MT6359_RG_VS2_FPWM_SHIFT 3 +#define MT6359_RG_VPA_MODESET_ADDR MT6359_VPA_ANA_CON0 +#define MT6359_RG_VPA_MODESET_SHIFT 1 +#define MT6359_RG_LDO_VSRAM_PROC1_VOSEL_ADDR MT6359_LDO_VSRAM_PROC1_ELR +#define MT6359_RG_LDO_VSRAM_PROC1_VOSEL_MASK 0x7F +#define MT6359_RG_LDO_VSRAM_PROC1_VOSEL_SHIFT 0 +#define MT6359_RG_LDO_VSRAM_PROC2_VOSEL_ADDR MT6359_LDO_VSRAM_PROC2_ELR +#define MT6359_RG_LDO_VSRAM_PROC2_VOSEL_MASK 0x7F +#define MT6359_RG_LDO_VSRAM_PROC2_VOSEL_SHIFT 0 +#define MT6359_RG_LDO_VSRAM_OTHERS_VOSEL_ADDR MT6359_LDO_VSRAM_OTHERS_ELR +#define MT6359_RG_LDO_VSRAM_OTHERS_VOSEL_MASK 0x7F +#define MT6359_RG_LDO_VSRAM_OTHERS_VOSEL_SHIFT 0 +#define MT6359_RG_LDO_VSRAM_MD_VOSEL_ADDR MT6359_LDO_VSRAM_MD_ELR +#define MT6359_RG_LDO_VSRAM_MD_VOSEL_MASK 0x7F +#define MT6359_RG_LDO_VSRAM_MD_VOSEL_SHIFT 0 +#define MT6359_RG_LDO_VFE28_EN_ADDR MT6359_LDO_VFE28_CON0 +#define MT6359_DA_VFE28_B_EN_ADDR MT6359_LDO_VFE28_MON +#define MT6359_RG_LDO_VXO22_EN_ADDR MT6359_LDO_VXO22_CON0 +#define MT6359_RG_LDO_VXO22_EN_SHIFT 0 +#define MT6359_DA_VXO22_B_EN_ADDR MT6359_LDO_VXO22_MON +#define MT6359_RG_LDO_VRF18_EN_ADDR MT6359_LDO_VRF18_CON0 +#define MT6359_RG_LDO_VRF18_EN_SHIFT 0 +#define MT6359_DA_VRF18_B_EN_ADDR MT6359_LDO_VRF18_MON +#define MT6359_RG_LDO_VRF12_EN_ADDR MT6359_LDO_VRF12_CON0 +#define MT6359_RG_LDO_VRF12_EN_SHIFT 0 +#define MT6359_DA_VRF12_B_EN_ADDR MT6359_LDO_VRF12_MON +#define MT6359_RG_LDO_VEFUSE_EN_ADDR MT6359_LDO_VEFUSE_CON0 +#define MT6359_RG_LDO_VEFUSE_EN_SHIFT 0 +#define MT6359_DA_VEFUSE_B_EN_ADDR MT6359_LDO_VEFUSE_MON +#define MT6359_RG_LDO_VCN33_1_EN_0_ADDR MT6359_LDO_VCN33_1_CON0 +#define MT6359_RG_LDO_VCN33_1_EN_0_MASK 0x1 +#define MT6359_RG_LDO_VCN33_1_EN_0_SHIFT 0 +#define MT6359_DA_VCN33_1_B_EN_ADDR MT6359_LDO_VCN33_1_MON +#define MT6359_RG_LDO_VCN33_1_EN_1_ADDR MT6359_LDO_VCN33_1_MULTI_SW +#define MT6359_RG_LDO_VCN33_1_EN_1_SHIFT 15 +#define MT6359_RG_LDO_VCN33_2_EN_0_ADDR MT6359_LDO_VCN33_2_CON0 +#define MT6359_RG_LDO_VCN33_2_EN_0_SHIFT 0 +#define MT6359_DA_VCN33_2_B_EN_ADDR MT6359_LDO_VCN33_2_MON +#define MT6359_RG_LDO_VCN33_2_EN_1_ADDR MT6359_LDO_VCN33_2_MULTI_SW +#define MT6359_RG_LDO_VCN33_2_EN_1_MASK 0x1 +#define MT6359_RG_LDO_VCN33_2_EN_1_SHIFT 15 +#define MT6359_RG_LDO_VCN13_EN_ADDR MT6359_LDO_VCN13_CON0 +#define MT6359_RG_LDO_VCN13_EN_SHIFT 0 +#define MT6359_DA_VCN13_B_EN_ADDR MT6359_LDO_VCN13_MON +#define MT6359_RG_LDO_VCN18_EN_ADDR MT6359_LDO_VCN18_CON0 +#define MT6359_DA_VCN18_B_EN_ADDR MT6359_LDO_VCN18_MON +#define MT6359_RG_LDO_VA09_EN_ADDR MT6359_LDO_VA09_CON0 +#define MT6359_RG_LDO_VA09_EN_SHIFT 0 +#define MT6359_DA_VA09_B_EN_ADDR MT6359_LDO_VA09_MON +#define MT6359_RG_LDO_VCAMIO_EN_ADDR MT6359_LDO_VCAMIO_CON0 +#define MT6359_RG_LDO_VCAMIO_EN_SHIFT 0 +#define MT6359_DA_VCAMIO_B_EN_ADDR MT6359_LDO_VCAMIO_MON +#define MT6359_RG_LDO_VA12_EN_ADDR MT6359_LDO_VA12_CON0 +#define MT6359_RG_LDO_VA12_EN_SHIFT 0 +#define MT6359_DA_VA12_B_EN_ADDR MT6359_LDO_VA12_MON +#define MT6359_RG_LDO_VAUX18_EN_ADDR MT6359_LDO_VAUX18_CON0 +#define MT6359_DA_VAUX18_B_EN_ADDR MT6359_LDO_VAUX18_MON +#define MT6359_RG_LDO_VAUD18_EN_ADDR MT6359_LDO_VAUD18_CON0 +#define MT6359_DA_VAUD18_B_EN_ADDR MT6359_LDO_VAUD18_MON +#define MT6359_RG_LDO_VIO18_EN_ADDR MT6359_LDO_VIO18_CON0 +#define MT6359_RG_LDO_VIO18_EN_SHIFT 0 +#define MT6359_DA_VIO18_B_EN_ADDR MT6359_LDO_VIO18_MON +#define MT6359_RG_LDO_VEMC_EN_ADDR MT6359_LDO_VEMC_CON0 +#define MT6359_RG_LDO_VEMC_EN_SHIFT 0 +#define MT6359_DA_VEMC_B_EN_ADDR MT6359_LDO_VEMC_MON +#define MT6359_RG_LDO_VSIM1_EN_ADDR MT6359_LDO_VSIM1_CON0 +#define MT6359_RG_LDO_VSIM1_EN_SHIFT 0 +#define MT6359_DA_VSIM1_B_EN_ADDR MT6359_LDO_VSIM1_MON +#define MT6359_RG_LDO_VSIM2_EN_ADDR MT6359_LDO_VSIM2_CON0 +#define MT6359_RG_LDO_VSIM2_EN_SHIFT 0 +#define MT6359_DA_VSIM2_B_EN_ADDR MT6359_LDO_VSIM2_MON +#define MT6359_RG_LDO_VUSB_EN_0_ADDR MT6359_LDO_VUSB_CON0 +#define MT6359_RG_LDO_VUSB_EN_0_MASK 0x1 +#define MT6359_RG_LDO_VUSB_EN_0_SHIFT 0 +#define MT6359_DA_VUSB_B_EN_ADDR MT6359_LDO_VUSB_MON +#define MT6359_RG_LDO_VUSB_EN_1_ADDR MT6359_LDO_VUSB_MULTI_SW +#define MT6359_RG_LDO_VUSB_EN_1_MASK 0x1 +#define MT6359_RG_LDO_VUSB_EN_1_SHIFT 15 +#define MT6359_RG_LDO_VRFCK_EN_ADDR MT6359_LDO_VRFCK_CON0 +#define MT6359_RG_LDO_VRFCK_EN_SHIFT 0 +#define MT6359_DA_VRFCK_B_EN_ADDR MT6359_LDO_VRFCK_MON +#define MT6359_RG_LDO_VBBCK_EN_ADDR MT6359_LDO_VBBCK_CON0 +#define MT6359_RG_LDO_VBBCK_EN_SHIFT 0 +#define MT6359_DA_VBBCK_B_EN_ADDR MT6359_LDO_VBBCK_MON +#define MT6359_RG_LDO_VBIF28_EN_ADDR MT6359_LDO_VBIF28_CON0 +#define MT6359_DA_VBIF28_B_EN_ADDR MT6359_LDO_VBIF28_MON +#define MT6359_RG_LDO_VIBR_EN_ADDR MT6359_LDO_VIBR_CON0 +#define MT6359_RG_LDO_VIBR_EN_SHIFT 0 +#define MT6359_DA_VIBR_B_EN_ADDR MT6359_LDO_VIBR_MON +#define MT6359_RG_LDO_VIO28_EN_ADDR MT6359_LDO_VIO28_CON0 +#define MT6359_RG_LDO_VIO28_EN_SHIFT 0 +#define MT6359_DA_VIO28_B_EN_ADDR MT6359_LDO_VIO28_MON +#define MT6359_RG_LDO_VM18_EN_ADDR MT6359_LDO_VM18_CON0 +#define MT6359_RG_LDO_VM18_EN_SHIFT 0 +#define MT6359_DA_VM18_B_EN_ADDR MT6359_LDO_VM18_MON +#define MT6359_RG_LDO_VUFS_EN_ADDR MT6359_LDO_VUFS_CON0 +#define MT6359_RG_LDO_VUFS_EN_SHIFT 0 +#define MT6359_DA_VUFS_B_EN_ADDR MT6359_LDO_VUFS_MON +#define MT6359_RG_LDO_VSRAM_PROC1_EN_ADDR MT6359_LDO_VSRAM_PROC1_CON0 +#define MT6359_DA_VSRAM_PROC1_B_EN_ADDR MT6359_LDO_VSRAM_PROC1_MON +#define MT6359_DA_VSRAM_PROC1_VOSEL_ADDR MT6359_LDO_VSRAM_PROC1_VOSEL1 +#define MT6359_DA_VSRAM_PROC1_VOSEL_MASK 0x7F +#define MT6359_DA_VSRAM_PROC1_VOSEL_SHIFT 8 +#define MT6359_RG_LDO_VSRAM_PROC2_EN_ADDR MT6359_LDO_VSRAM_PROC2_CON0 +#define MT6359_DA_VSRAM_PROC2_B_EN_ADDR MT6359_LDO_VSRAM_PROC2_MON +#define MT6359_DA_VSRAM_PROC2_VOSEL_ADDR MT6359_LDO_VSRAM_PROC2_VOSEL1 +#define MT6359_DA_VSRAM_PROC2_VOSEL_MASK 0x7F +#define MT6359_DA_VSRAM_PROC2_VOSEL_SHIFT 8 +#define MT6359_RG_LDO_VSRAM_OTHERS_EN_ADDR MT6359_LDO_VSRAM_OTHERS_CON0 +#define MT6359_DA_VSRAM_OTHERS_B_EN_ADDR MT6359_LDO_VSRAM_OTHERS_MON +#define MT6359_DA_VSRAM_OTHERS_VOSEL_ADDR MT6359_LDO_VSRAM_OTHERS_VOSEL1 +#define MT6359_DA_VSRAM_OTHERS_VOSEL_MASK 0x7F +#define MT6359_DA_VSRAM_OTHERS_VOSEL_SHIFT 8 +#define MT6359_RG_LDO_VSRAM_OTHERS_SSHUB_EN_ADDR MT6359_LDO_VSRAM_OTHERS_SSHUB +#define MT6359_RG_LDO_VSRAM_OTHERS_SSHUB_VOSEL_ADDR MT6359_LDO_VSRAM_OTHERS_SSHUB +#define MT6359_RG_LDO_VSRAM_OTHERS_SSHUB_VOSEL_MASK 0x7F +#define MT6359_RG_LDO_VSRAM_OTHERS_SSHUB_VOSEL_SHIFT 1 +#define MT6359_RG_LDO_VSRAM_MD_EN_ADDR MT6359_LDO_VSRAM_MD_CON0 +#define MT6359_DA_VSRAM_MD_B_EN_ADDR MT6359_LDO_VSRAM_MD_MON +#define MT6359_DA_VSRAM_MD_VOSEL_ADDR MT6359_LDO_VSRAM_MD_VOSEL1 +#define MT6359_DA_VSRAM_MD_VOSEL_MASK 0x7F +#define MT6359_DA_VSRAM_MD_VOSEL_SHIFT 8 +#define MT6359_RG_VCN33_1_VOSEL_ADDR MT6359_VCN33_1_ANA_CON0 +#define MT6359_RG_VCN33_1_VOSEL_MASK 0xF +#define MT6359_RG_VCN33_1_VOSEL_SHIFT 8 +#define MT6359_RG_VCN33_2_VOSEL_ADDR MT6359_VCN33_2_ANA_CON0 +#define MT6359_RG_VCN33_2_VOSEL_MASK 0xF +#define MT6359_RG_VCN33_2_VOSEL_SHIFT 8 +#define MT6359_RG_VEMC_VOSEL_ADDR MT6359_VEMC_ANA_CON0 +#define MT6359_RG_VEMC_VOSEL_MASK 0xF +#define MT6359_RG_VEMC_VOSEL_SHIFT 8 +#define MT6359_RG_VSIM1_VOSEL_ADDR MT6359_VSIM1_ANA_CON0 +#define MT6359_RG_VSIM1_VOSEL_MASK 0xF +#define MT6359_RG_VSIM1_VOSEL_SHIFT 8 +#define MT6359_RG_VSIM2_VOSEL_ADDR MT6359_VSIM2_ANA_CON0 +#define MT6359_RG_VSIM2_VOSEL_MASK 0xF +#define MT6359_RG_VSIM2_VOSEL_SHIFT 8 +#define MT6359_RG_VIO28_VOSEL_ADDR MT6359_VIO28_ANA_CON0 +#define MT6359_RG_VIO28_VOSEL_MASK 0xF +#define MT6359_RG_VIO28_VOSEL_SHIFT 8 +#define MT6359_RG_VIBR_VOSEL_ADDR MT6359_VIBR_ANA_CON0 +#define MT6359_RG_VIBR_VOSEL_MASK 0xF +#define MT6359_RG_VIBR_VOSEL_SHIFT 8 +#define MT6359_RG_VRF18_VOSEL_ADDR MT6359_VRF18_ANA_CON0 +#define MT6359_RG_VRF18_VOSEL_MASK 0xF +#define MT6359_RG_VRF18_VOSEL_SHIFT 8 +#define MT6359_RG_VEFUSE_VOSEL_ADDR MT6359_VEFUSE_ANA_CON0 +#define MT6359_RG_VEFUSE_VOSEL_MASK 0xF +#define MT6359_RG_VEFUSE_VOSEL_SHIFT 8 +#define MT6359_RG_VCAMIO_VOSEL_ADDR MT6359_VCAMIO_ANA_CON0 +#define MT6359_RG_VCAMIO_VOSEL_MASK 0xF +#define MT6359_RG_VCAMIO_VOSEL_SHIFT 8 +#define MT6359_RG_VIO18_VOSEL_ADDR MT6359_VIO18_ANA_CON0 +#define MT6359_RG_VIO18_VOSEL_MASK 0xF +#define MT6359_RG_VIO18_VOSEL_SHIFT 8 +#define MT6359_RG_VM18_VOSEL_ADDR MT6359_VM18_ANA_CON0 +#define MT6359_RG_VM18_VOSEL_MASK 0xF +#define MT6359_RG_VM18_VOSEL_SHIFT 8 +#define MT6359_RG_VUFS_VOSEL_ADDR MT6359_VUFS_ANA_CON0 +#define MT6359_RG_VUFS_VOSEL_MASK 0xF +#define MT6359_RG_VUFS_VOSEL_SHIFT 8 +#define MT6359_RG_VRF12_VOSEL_ADDR MT6359_VRF12_ANA_CON0 +#define MT6359_RG_VRF12_VOSEL_MASK 0xF +#define MT6359_RG_VRF12_VOSEL_SHIFT 8 +#define MT6359_RG_VCN13_VOSEL_ADDR MT6359_VCN13_ANA_CON0 +#define MT6359_RG_VCN13_VOSEL_MASK 0xF +#define MT6359_RG_VCN13_VOSEL_SHIFT 8 +#define MT6359_RG_VA09_VOSEL_ADDR MT6359_VA09_ANA_CON0 +#define MT6359_RG_VA09_VOSEL_MASK 0xF +#define MT6359_RG_VA09_VOSEL_SHIFT 8 +#define MT6359_RG_VA12_VOSEL_ADDR MT6359_VA12_ANA_CON0 +#define MT6359_RG_VA12_VOSEL_MASK 0xF +#define MT6359_RG_VA12_VOSEL_SHIFT 8 +#define MT6359_RG_VXO22_VOSEL_ADDR MT6359_VXO22_ANA_CON0 +#define MT6359_RG_VXO22_VOSEL_MASK 0xF +#define MT6359_RG_VXO22_VOSEL_SHIFT 8 +#define MT6359_RG_VRFCK_VOSEL_ADDR MT6359_VRFCK_ANA_CON0 +#define MT6359_RG_VRFCK_VOSEL_MASK 0xF +#define MT6359_RG_VRFCK_VOSEL_SHIFT 8 +#define MT6359_RG_VBBCK_VOSEL_ADDR MT6359_VBBCK_ANA_CON0 +#define MT6359_RG_VBBCK_VOSEL_MASK 0xF +#define MT6359_RG_VBBCK_VOSEL_SHIFT 8 + +#endif /* __MFD_MT6359_REGISTERS_H__ */ diff --git a/include/linux/mfd/mt6359p/registers.h b/include/linux/mfd/mt6359p/registers.h new file mode 100644 index 0000000000..3d97c18851 --- /dev/null +++ b/include/linux/mfd/mt6359p/registers.h @@ -0,0 +1,249 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2021 MediaTek Inc. + */ + +#ifndef __MFD_MT6359P_REGISTERS_H__ +#define __MFD_MT6359P_REGISTERS_H__ + +#define MT6359P_CHIP_VER 0x5930 + +/* PMIC Registers */ +#define MT6359P_HWCID 0x8 +#define MT6359P_TOP_TRAP 0x50 +#define MT6359P_TOP_TMA_KEY 0x3a8 +#define MT6359P_BUCK_VCORE_ELR_NUM 0x152a +#define MT6359P_BUCK_VCORE_ELR0 0x152c +#define MT6359P_BUCK_VGPU11_SSHUB_CON0 0x15aa +#define MT6359P_BUCK_VGPU11_ELR0 0x15b4 +#define MT6359P_LDO_VSRAM_PROC1_ELR 0x1b44 +#define MT6359P_LDO_VSRAM_PROC2_ELR 0x1b46 +#define MT6359P_LDO_VSRAM_OTHERS_ELR 0x1b48 +#define MT6359P_LDO_VSRAM_MD_ELR 0x1b4a +#define MT6359P_LDO_VEMC_ELR_0 0x1b4c +#define MT6359P_LDO_VFE28_CON0 0x1b88 +#define MT6359P_LDO_VFE28_MON 0x1b8c +#define MT6359P_LDO_VXO22_CON0 0x1b9a +#define MT6359P_LDO_VXO22_MON 0x1b9e +#define MT6359P_LDO_VRF18_CON0 0x1bac +#define MT6359P_LDO_VRF18_MON 0x1bb0 +#define MT6359P_LDO_VRF12_CON0 0x1bbe +#define MT6359P_LDO_VRF12_MON 0x1bc2 +#define MT6359P_LDO_VEFUSE_CON0 0x1bd0 +#define MT6359P_LDO_VEFUSE_MON 0x1bd4 +#define MT6359P_LDO_VCN33_1_CON0 0x1be2 +#define MT6359P_LDO_VCN33_1_MON 0x1be6 +#define MT6359P_LDO_VCN33_1_MULTI_SW 0x1bf4 +#define MT6359P_LDO_VCN33_2_CON0 0x1c08 +#define MT6359P_LDO_VCN33_2_MON 0x1c0c +#define MT6359P_LDO_VCN33_2_MULTI_SW 0x1c1a +#define MT6359P_LDO_VCN13_CON0 0x1c1c +#define MT6359P_LDO_VCN13_MON 0x1c20 +#define MT6359P_LDO_VCN18_CON0 0x1c2e +#define MT6359P_LDO_VCN18_MON 0x1c32 +#define MT6359P_LDO_VA09_CON0 0x1c40 +#define MT6359P_LDO_VA09_MON 0x1c44 +#define MT6359P_LDO_VCAMIO_CON0 0x1c52 +#define MT6359P_LDO_VCAMIO_MON 0x1c56 +#define MT6359P_LDO_VA12_CON0 0x1c64 +#define MT6359P_LDO_VA12_MON 0x1c68 +#define MT6359P_LDO_VAUX18_CON0 0x1c88 +#define MT6359P_LDO_VAUX18_MON 0x1c8c +#define MT6359P_LDO_VAUD18_CON0 0x1c9a +#define MT6359P_LDO_VAUD18_MON 0x1c9e +#define MT6359P_LDO_VIO18_CON0 0x1cac +#define MT6359P_LDO_VIO18_MON 0x1cb0 +#define MT6359P_LDO_VEMC_CON0 0x1cbe +#define MT6359P_LDO_VEMC_MON 0x1cc2 +#define MT6359P_LDO_VSIM1_CON0 0x1cd0 +#define MT6359P_LDO_VSIM1_MON 0x1cd4 +#define MT6359P_LDO_VSIM2_CON0 0x1ce2 +#define MT6359P_LDO_VSIM2_MON 0x1ce6 +#define MT6359P_LDO_VUSB_CON0 0x1d08 +#define MT6359P_LDO_VUSB_MON 0x1d0c +#define MT6359P_LDO_VUSB_MULTI_SW 0x1d1a +#define MT6359P_LDO_VRFCK_CON0 0x1d1c +#define MT6359P_LDO_VRFCK_MON 0x1d20 +#define MT6359P_LDO_VBBCK_CON0 0x1d2e +#define MT6359P_LDO_VBBCK_MON 0x1d32 +#define MT6359P_LDO_VBIF28_CON0 0x1d40 +#define MT6359P_LDO_VBIF28_MON 0x1d44 +#define MT6359P_LDO_VIBR_CON0 0x1d52 +#define MT6359P_LDO_VIBR_MON 0x1d56 +#define MT6359P_LDO_VIO28_CON0 0x1d64 +#define MT6359P_LDO_VIO28_MON 0x1d68 +#define MT6359P_LDO_VM18_CON0 0x1d88 +#define MT6359P_LDO_VM18_MON 0x1d8c +#define MT6359P_LDO_VUFS_CON0 0x1d9a +#define MT6359P_LDO_VUFS_MON 0x1d9e +#define MT6359P_LDO_VSRAM_PROC1_CON0 0x1e88 +#define MT6359P_LDO_VSRAM_PROC1_MON 0x1e8c +#define MT6359P_LDO_VSRAM_PROC1_VOSEL1 0x1e90 +#define MT6359P_LDO_VSRAM_PROC2_CON0 0x1ea8 +#define MT6359P_LDO_VSRAM_PROC2_MON 0x1eac +#define MT6359P_LDO_VSRAM_PROC2_VOSEL1 0x1eb0 +#define MT6359P_LDO_VSRAM_OTHERS_CON0 0x1f08 +#define MT6359P_LDO_VSRAM_OTHERS_MON 0x1f0c +#define MT6359P_LDO_VSRAM_OTHERS_VOSEL1 0x1f10 +#define MT6359P_LDO_VSRAM_OTHERS_SSHUB 0x1f28 +#define MT6359P_LDO_VSRAM_MD_CON0 0x1f2e +#define MT6359P_LDO_VSRAM_MD_MON 0x1f32 +#define MT6359P_LDO_VSRAM_MD_VOSEL1 0x1f36 +#define MT6359P_VFE28_ANA_CON0 0x1f88 +#define MT6359P_VAUX18_ANA_CON0 0x1f8c +#define MT6359P_VUSB_ANA_CON0 0x1f90 +#define MT6359P_VBIF28_ANA_CON0 0x1f94 +#define MT6359P_VCN33_1_ANA_CON0 0x1f98 +#define MT6359P_VCN33_2_ANA_CON0 0x1f9c +#define MT6359P_VEMC_ANA_CON0 0x1fa0 +#define MT6359P_VSIM1_ANA_CON0 0x1fa2 +#define MT6359P_VSIM2_ANA_CON0 0x1fa6 +#define MT6359P_VIO28_ANA_CON0 0x1faa +#define MT6359P_VIBR_ANA_CON0 0x1fae +#define MT6359P_VFE28_ELR_4 0x1fc0 +#define MT6359P_VRF18_ANA_CON0 0x2008 +#define MT6359P_VEFUSE_ANA_CON0 0x200c +#define MT6359P_VCN18_ANA_CON0 0x2010 +#define MT6359P_VCAMIO_ANA_CON0 0x2014 +#define MT6359P_VAUD18_ANA_CON0 0x2018 +#define MT6359P_VIO18_ANA_CON0 0x201c +#define MT6359P_VM18_ANA_CON0 0x2020 +#define MT6359P_VUFS_ANA_CON0 0x2024 +#define MT6359P_VRF12_ANA_CON0 0x202a +#define MT6359P_VCN13_ANA_CON0 0x202e +#define MT6359P_VA09_ANA_CON0 0x2032 +#define MT6359P_VRF18_ELR_3 0x204e +#define MT6359P_VXO22_ANA_CON0 0x2088 +#define MT6359P_VRFCK_ANA_CON0 0x208c +#define MT6359P_VBBCK_ANA_CON0 0x2096 + +#define MT6359P_RG_BUCK_VCORE_VOSEL_ADDR MT6359P_BUCK_VCORE_ELR0 +#define MT6359P_RG_BUCK_VGPU11_SSHUB_EN_ADDR MT6359P_BUCK_VGPU11_SSHUB_CON0 +#define MT6359P_RG_BUCK_VGPU11_VOSEL_ADDR MT6359P_BUCK_VGPU11_ELR0 +#define MT6359P_RG_BUCK_VGPU11_SSHUB_VOSEL_ADDR MT6359P_BUCK_VGPU11_SSHUB_CON0 +#define MT6359P_RG_BUCK_VGPU11_SSHUB_VOSEL_MASK 0x7F +#define MT6359P_RG_BUCK_VGPU11_SSHUB_VOSEL_SHIFT 4 +#define MT6359P_RG_LDO_VSRAM_PROC1_VOSEL_ADDR MT6359P_LDO_VSRAM_PROC1_ELR +#define MT6359P_RG_LDO_VSRAM_PROC2_VOSEL_ADDR MT6359P_LDO_VSRAM_PROC2_ELR +#define MT6359P_RG_LDO_VSRAM_OTHERS_VOSEL_ADDR MT6359P_LDO_VSRAM_OTHERS_ELR +#define MT6359P_RG_LDO_VSRAM_MD_VOSEL_ADDR MT6359P_LDO_VSRAM_MD_ELR +#define MT6359P_RG_LDO_VEMC_VOSEL_0_ADDR MT6359P_LDO_VEMC_ELR_0 +#define MT6359P_RG_LDO_VEMC_VOSEL_0_MASK 0xF +#define MT6359P_RG_LDO_VEMC_VOSEL_0_SHIFT 0 +#define MT6359P_RG_LDO_VFE28_EN_ADDR MT6359P_LDO_VFE28_CON0 +#define MT6359P_DA_VFE28_B_EN_ADDR MT6359P_LDO_VFE28_MON +#define MT6359P_RG_LDO_VXO22_EN_ADDR MT6359P_LDO_VXO22_CON0 +#define MT6359P_RG_LDO_VXO22_EN_SHIFT 0 +#define MT6359P_DA_VXO22_B_EN_ADDR MT6359P_LDO_VXO22_MON +#define MT6359P_RG_LDO_VRF18_EN_ADDR MT6359P_LDO_VRF18_CON0 +#define MT6359P_RG_LDO_VRF18_EN_SHIFT 0 +#define MT6359P_DA_VRF18_B_EN_ADDR MT6359P_LDO_VRF18_MON +#define MT6359P_RG_LDO_VRF12_EN_ADDR MT6359P_LDO_VRF12_CON0 +#define MT6359P_RG_LDO_VRF12_EN_SHIFT 0 +#define MT6359P_DA_VRF12_B_EN_ADDR MT6359P_LDO_VRF12_MON +#define MT6359P_RG_LDO_VEFUSE_EN_ADDR MT6359P_LDO_VEFUSE_CON0 +#define MT6359P_RG_LDO_VEFUSE_EN_SHIFT 0 +#define MT6359P_DA_VEFUSE_B_EN_ADDR MT6359P_LDO_VEFUSE_MON +#define MT6359P_RG_LDO_VCN33_1_EN_0_ADDR MT6359P_LDO_VCN33_1_CON0 +#define MT6359P_DA_VCN33_1_B_EN_ADDR MT6359P_LDO_VCN33_1_MON +#define MT6359P_RG_LDO_VCN33_1_EN_1_ADDR MT6359P_LDO_VCN33_1_MULTI_SW +#define MT6359P_RG_LDO_VCN33_1_EN_1_SHIFT 15 +#define MT6359P_RG_LDO_VCN33_2_EN_0_ADDR MT6359P_LDO_VCN33_2_CON0 +#define MT6359P_RG_LDO_VCN33_2_EN_0_SHIFT 0 +#define MT6359P_DA_VCN33_2_B_EN_ADDR MT6359P_LDO_VCN33_2_MON +#define MT6359P_RG_LDO_VCN33_2_EN_1_ADDR MT6359P_LDO_VCN33_2_MULTI_SW +#define MT6359P_RG_LDO_VCN13_EN_ADDR MT6359P_LDO_VCN13_CON0 +#define MT6359P_RG_LDO_VCN13_EN_SHIFT 0 +#define MT6359P_DA_VCN13_B_EN_ADDR MT6359P_LDO_VCN13_MON +#define MT6359P_RG_LDO_VCN18_EN_ADDR MT6359P_LDO_VCN18_CON0 +#define MT6359P_DA_VCN18_B_EN_ADDR MT6359P_LDO_VCN18_MON +#define MT6359P_RG_LDO_VA09_EN_ADDR MT6359P_LDO_VA09_CON0 +#define MT6359P_RG_LDO_VA09_EN_SHIFT 0 +#define MT6359P_DA_VA09_B_EN_ADDR MT6359P_LDO_VA09_MON +#define MT6359P_RG_LDO_VCAMIO_EN_ADDR MT6359P_LDO_VCAMIO_CON0 +#define MT6359P_RG_LDO_VCAMIO_EN_SHIFT 0 +#define MT6359P_DA_VCAMIO_B_EN_ADDR MT6359P_LDO_VCAMIO_MON +#define MT6359P_RG_LDO_VA12_EN_ADDR MT6359P_LDO_VA12_CON0 +#define MT6359P_RG_LDO_VA12_EN_SHIFT 0 +#define MT6359P_DA_VA12_B_EN_ADDR MT6359P_LDO_VA12_MON +#define MT6359P_RG_LDO_VAUX18_EN_ADDR MT6359P_LDO_VAUX18_CON0 +#define MT6359P_DA_VAUX18_B_EN_ADDR MT6359P_LDO_VAUX18_MON +#define MT6359P_RG_LDO_VAUD18_EN_ADDR MT6359P_LDO_VAUD18_CON0 +#define MT6359P_DA_VAUD18_B_EN_ADDR MT6359P_LDO_VAUD18_MON +#define MT6359P_RG_LDO_VIO18_EN_ADDR MT6359P_LDO_VIO18_CON0 +#define MT6359P_RG_LDO_VIO18_EN_SHIFT 0 +#define MT6359P_DA_VIO18_B_EN_ADDR MT6359P_LDO_VIO18_MON +#define MT6359P_RG_LDO_VEMC_EN_ADDR MT6359P_LDO_VEMC_CON0 +#define MT6359P_RG_LDO_VEMC_EN_SHIFT 0 +#define MT6359P_DA_VEMC_B_EN_ADDR MT6359P_LDO_VEMC_MON +#define MT6359P_RG_LDO_VSIM1_EN_ADDR MT6359P_LDO_VSIM1_CON0 +#define MT6359P_RG_LDO_VSIM1_EN_SHIFT 0 +#define MT6359P_DA_VSIM1_B_EN_ADDR MT6359P_LDO_VSIM1_MON +#define MT6359P_RG_LDO_VSIM2_EN_ADDR MT6359P_LDO_VSIM2_CON0 +#define MT6359P_RG_LDO_VSIM2_EN_SHIFT 0 +#define MT6359P_DA_VSIM2_B_EN_ADDR MT6359P_LDO_VSIM2_MON +#define MT6359P_RG_LDO_VUSB_EN_0_ADDR MT6359P_LDO_VUSB_CON0 +#define MT6359P_DA_VUSB_B_EN_ADDR MT6359P_LDO_VUSB_MON +#define MT6359P_RG_LDO_VUSB_EN_1_ADDR MT6359P_LDO_VUSB_MULTI_SW +#define MT6359P_RG_LDO_VRFCK_EN_ADDR MT6359P_LDO_VRFCK_CON0 +#define MT6359P_RG_LDO_VRFCK_EN_SHIFT 0 +#define MT6359P_DA_VRFCK_B_EN_ADDR MT6359P_LDO_VRFCK_MON +#define MT6359P_RG_LDO_VBBCK_EN_ADDR MT6359P_LDO_VBBCK_CON0 +#define MT6359P_RG_LDO_VBBCK_EN_SHIFT 0 +#define MT6359P_DA_VBBCK_B_EN_ADDR MT6359P_LDO_VBBCK_MON +#define MT6359P_RG_LDO_VBIF28_EN_ADDR MT6359P_LDO_VBIF28_CON0 +#define MT6359P_DA_VBIF28_B_EN_ADDR MT6359P_LDO_VBIF28_MON +#define MT6359P_RG_LDO_VIBR_EN_ADDR MT6359P_LDO_VIBR_CON0 +#define MT6359P_RG_LDO_VIBR_EN_SHIFT 0 +#define MT6359P_DA_VIBR_B_EN_ADDR MT6359P_LDO_VIBR_MON +#define MT6359P_RG_LDO_VIO28_EN_ADDR MT6359P_LDO_VIO28_CON0 +#define MT6359P_RG_LDO_VIO28_EN_SHIFT 0 +#define MT6359P_DA_VIO28_B_EN_ADDR MT6359P_LDO_VIO28_MON +#define MT6359P_RG_LDO_VM18_EN_ADDR MT6359P_LDO_VM18_CON0 +#define MT6359P_RG_LDO_VM18_EN_SHIFT 0 +#define MT6359P_DA_VM18_B_EN_ADDR MT6359P_LDO_VM18_MON +#define MT6359P_RG_LDO_VUFS_EN_ADDR MT6359P_LDO_VUFS_CON0 +#define MT6359P_RG_LDO_VUFS_EN_SHIFT 0 +#define MT6359P_DA_VUFS_B_EN_ADDR MT6359P_LDO_VUFS_MON +#define MT6359P_RG_LDO_VSRAM_PROC1_EN_ADDR MT6359P_LDO_VSRAM_PROC1_CON0 +#define MT6359P_DA_VSRAM_PROC1_B_EN_ADDR MT6359P_LDO_VSRAM_PROC1_MON +#define MT6359P_DA_VSRAM_PROC1_VOSEL_ADDR MT6359P_LDO_VSRAM_PROC1_VOSEL1 +#define MT6359P_RG_LDO_VSRAM_PROC2_EN_ADDR MT6359P_LDO_VSRAM_PROC2_CON0 +#define MT6359P_DA_VSRAM_PROC2_B_EN_ADDR MT6359P_LDO_VSRAM_PROC2_MON +#define MT6359P_DA_VSRAM_PROC2_VOSEL_ADDR MT6359P_LDO_VSRAM_PROC2_VOSEL1 +#define MT6359P_RG_LDO_VSRAM_OTHERS_EN_ADDR MT6359P_LDO_VSRAM_OTHERS_CON0 +#define MT6359P_DA_VSRAM_OTHERS_B_EN_ADDR MT6359P_LDO_VSRAM_OTHERS_MON +#define MT6359P_DA_VSRAM_OTHERS_VOSEL_ADDR MT6359P_LDO_VSRAM_OTHERS_VOSEL1 +#define MT6359P_RG_LDO_VSRAM_OTHERS_SSHUB_EN_ADDR MT6359P_LDO_VSRAM_OTHERS_SSHUB +#define MT6359P_RG_LDO_VSRAM_OTHERS_SSHUB_VOSEL_ADDR MT6359P_LDO_VSRAM_OTHERS_SSHUB +#define MT6359P_RG_LDO_VSRAM_MD_EN_ADDR MT6359P_LDO_VSRAM_MD_CON0 +#define MT6359P_DA_VSRAM_MD_B_EN_ADDR MT6359P_LDO_VSRAM_MD_MON +#define MT6359P_DA_VSRAM_MD_VOSEL_ADDR MT6359P_LDO_VSRAM_MD_VOSEL1 +#define MT6359P_RG_VCN33_1_VOSEL_ADDR MT6359P_VCN33_1_ANA_CON0 +#define MT6359P_RG_VCN33_2_VOSEL_ADDR MT6359P_VCN33_2_ANA_CON0 +#define MT6359P_RG_VEMC_VOSEL_ADDR MT6359P_VEMC_ANA_CON0 +#define MT6359P_RG_VSIM1_VOSEL_ADDR MT6359P_VSIM1_ANA_CON0 +#define MT6359P_RG_VSIM2_VOSEL_ADDR MT6359P_VSIM2_ANA_CON0 +#define MT6359P_RG_VIO28_VOSEL_ADDR MT6359P_VIO28_ANA_CON0 +#define MT6359P_RG_VIBR_VOSEL_ADDR MT6359P_VIBR_ANA_CON0 +#define MT6359P_RG_VRF18_VOSEL_ADDR MT6359P_VRF18_ANA_CON0 +#define MT6359P_RG_VEFUSE_VOSEL_ADDR MT6359P_VEFUSE_ANA_CON0 +#define MT6359P_RG_VCAMIO_VOSEL_ADDR MT6359P_VCAMIO_ANA_CON0 +#define MT6359P_RG_VIO18_VOSEL_ADDR MT6359P_VIO18_ANA_CON0 +#define MT6359P_RG_VM18_VOSEL_ADDR MT6359P_VM18_ANA_CON0 +#define MT6359P_RG_VUFS_VOSEL_ADDR MT6359P_VUFS_ANA_CON0 +#define MT6359P_RG_VRF12_VOSEL_ADDR MT6359P_VRF12_ANA_CON0 +#define MT6359P_RG_VCN13_VOSEL_ADDR MT6359P_VCN13_ANA_CON0 +#define MT6359P_RG_VA09_VOSEL_ADDR MT6359P_VRF18_ELR_3 +#define MT6359P_RG_VA12_VOSEL_ADDR MT6359P_VFE28_ELR_4 +#define MT6359P_RG_VXO22_VOSEL_ADDR MT6359P_VXO22_ANA_CON0 +#define MT6359P_RG_VRFCK_VOSEL_ADDR MT6359P_VRFCK_ANA_CON0 +#define MT6359P_RG_VBBCK_VOSEL_ADDR MT6359P_VBBCK_ANA_CON0 +#define MT6359P_RG_VBBCK_VOSEL_MASK 0xF +#define MT6359P_RG_VBBCK_VOSEL_SHIFT 4 +#define MT6359P_VM_MODE_ADDR MT6359P_TOP_TRAP +#define MT6359P_TMA_KEY_ADDR MT6359P_TOP_TMA_KEY + +#define TMA_KEY 0x9CA6 + +#endif /* __MFD_MT6359P_REGISTERS_H__ */ diff --git a/include/linux/mfd/mt6397/core.h b/include/linux/mfd/mt6397/core.h index 949268581b..56f210eebc 100644 --- a/include/linux/mfd/mt6397/core.h +++ b/include/linux/mfd/mt6397/core.h @@ -13,6 +13,7 @@ enum chip_id { MT6323_CHIP_ID = 0x23, MT6358_CHIP_ID = 0x58, + MT6359_CHIP_ID = 0x59, MT6391_CHIP_ID = 0x91, MT6397_CHIP_ID = 0x97, }; diff --git a/include/linux/mfd/mt6397/rtc.h b/include/linux/mfd/mt6397/rtc.h index c3748b53bf..068ae1c0f0 100644 --- a/include/linux/mfd/mt6397/rtc.h +++ b/include/linux/mfd/mt6397/rtc.h @@ -36,6 +36,7 @@ #define RTC_AL_MASK_DOW BIT(4) #define RTC_TC_SEC 0x000a +#define RTC_TC_MTH_MASK 0x000f /* Min, Hour, Dom... register offset to RTC_TC_SEC */ #define RTC_OFFSET_SEC 0 #define RTC_OFFSET_MIN 1 diff --git a/include/linux/mfd/rk808.h b/include/linux/mfd/rk808.h index e07f6e61cd..a96e6d43ca 100644 --- a/include/linux/mfd/rk808.h +++ b/include/linux/mfd/rk808.h @@ -437,6 +437,87 @@ enum rk809_reg_id { #define RK817_RTC_COMP_LSB_REG 0x10 #define RK817_RTC_COMP_MSB_REG 0x11 +/* RK817 Codec Registers */ +#define RK817_CODEC_DTOP_VUCTL 0x12 +#define RK817_CODEC_DTOP_VUCTIME 0x13 +#define RK817_CODEC_DTOP_LPT_SRST 0x14 +#define RK817_CODEC_DTOP_DIGEN_CLKE 0x15 +#define RK817_CODEC_AREF_RTCFG0 0x16 +#define RK817_CODEC_AREF_RTCFG1 0x17 +#define RK817_CODEC_AADC_CFG0 0x18 +#define RK817_CODEC_AADC_CFG1 0x19 +#define RK817_CODEC_DADC_VOLL 0x1a +#define RK817_CODEC_DADC_VOLR 0x1b +#define RK817_CODEC_DADC_SR_ACL0 0x1e +#define RK817_CODEC_DADC_ALC1 0x1f +#define RK817_CODEC_DADC_ALC2 0x20 +#define RK817_CODEC_DADC_NG 0x21 +#define RK817_CODEC_DADC_HPF 0x22 +#define RK817_CODEC_DADC_RVOLL 0x23 +#define RK817_CODEC_DADC_RVOLR 0x24 +#define RK817_CODEC_AMIC_CFG0 0x27 +#define RK817_CODEC_AMIC_CFG1 0x28 +#define RK817_CODEC_DMIC_PGA_GAIN 0x29 +#define RK817_CODEC_DMIC_LMT1 0x2a +#define RK817_CODEC_DMIC_LMT2 0x2b +#define RK817_CODEC_DMIC_NG1 0x2c +#define RK817_CODEC_DMIC_NG2 0x2d +#define RK817_CODEC_ADAC_CFG0 0x2e +#define RK817_CODEC_ADAC_CFG1 0x2f +#define RK817_CODEC_DDAC_POPD_DACST 0x30 +#define RK817_CODEC_DDAC_VOLL 0x31 +#define RK817_CODEC_DDAC_VOLR 0x32 +#define RK817_CODEC_DDAC_SR_LMT0 0x35 +#define RK817_CODEC_DDAC_LMT1 0x36 +#define RK817_CODEC_DDAC_LMT2 0x37 +#define RK817_CODEC_DDAC_MUTE_MIXCTL 0x38 +#define RK817_CODEC_DDAC_RVOLL 0x39 +#define RK817_CODEC_DDAC_RVOLR 0x3a +#define RK817_CODEC_AHP_ANTI0 0x3b +#define RK817_CODEC_AHP_ANTI1 0x3c +#define RK817_CODEC_AHP_CFG0 0x3d +#define RK817_CODEC_AHP_CFG1 0x3e +#define RK817_CODEC_AHP_CP 0x3f +#define RK817_CODEC_ACLASSD_CFG1 0x40 +#define RK817_CODEC_ACLASSD_CFG2 0x41 +#define RK817_CODEC_APLL_CFG0 0x42 +#define RK817_CODEC_APLL_CFG1 0x43 +#define RK817_CODEC_APLL_CFG2 0x44 +#define RK817_CODEC_APLL_CFG3 0x45 +#define RK817_CODEC_APLL_CFG4 0x46 +#define RK817_CODEC_APLL_CFG5 0x47 +#define RK817_CODEC_DI2S_CKM 0x48 +#define RK817_CODEC_DI2S_RSD 0x49 +#define RK817_CODEC_DI2S_RXCR1 0x4a +#define RK817_CODEC_DI2S_RXCR2 0x4b +#define RK817_CODEC_DI2S_RXCMD_TSD 0x4c +#define RK817_CODEC_DI2S_TXCR1 0x4d +#define RK817_CODEC_DI2S_TXCR2 0x4e +#define RK817_CODEC_DI2S_TXCR3_TXCMD 0x4f + +/* RK817_CODEC_DI2S_CKM */ +#define RK817_I2S_MODE_MASK (0x1 << 0) +#define RK817_I2S_MODE_MST (0x1 << 0) +#define RK817_I2S_MODE_SLV (0x0 << 0) + +/* RK817_CODEC_DDAC_MUTE_MIXCTL */ +#define DACMT_MASK (0x1 << 0) +#define DACMT_ENABLE (0x1 << 0) +#define DACMT_DISABLE (0x0 << 0) + +/* RK817_CODEC_DI2S_RXCR2 */ +#define VDW_RX_24BITS (0x17) +#define VDW_RX_16BITS (0x0f) + +/* RK817_CODEC_DI2S_TXCR2 */ +#define VDW_TX_24BITS (0x17) +#define VDW_TX_16BITS (0x0f) + +/* RK817_CODEC_AMIC_CFG0 */ +#define MIC_DIFF_MASK (0x1 << 7) +#define MIC_DIFF_DIS (0x0 << 7) +#define MIC_DIFF_EN (0x1 << 7) + #define RK817_POWER_EN_REG(i) (0xb1 + (i)) #define RK817_POWER_SLP_EN_REG(i) (0xb5 + (i)) diff --git a/include/linux/mfd/samsung/core.h b/include/linux/mfd/samsung/core.h index f1631a39ac..f92fe09047 100644 --- a/include/linux/mfd/samsung/core.h +++ b/include/linux/mfd/samsung/core.h @@ -67,11 +67,8 @@ struct sec_pmic_dev { struct i2c_client *i2c; unsigned long device_type; - int irq_base; int irq; struct regmap_irq_chip_data *irq_data; - - bool wakeup; }; int sec_irq_init(struct sec_pmic_dev *sec_pmic); @@ -81,15 +78,8 @@ int sec_irq_resume(struct sec_pmic_dev *sec_pmic); struct sec_platform_data { struct sec_regulator_data *regulators; struct sec_opmode_data *opmode; - int device_type; int num_regulators; - int irq_base; - int (*cfg_pmic_irq)(void); - - bool wakeup; - bool buck_voltage_lock; - int buck_gpios[3]; int buck_ds[3]; unsigned int buck2_voltage[8]; @@ -99,35 +89,12 @@ struct sec_platform_data { unsigned int buck4_voltage[8]; bool buck4_gpiodvs; - int buck_set1; - int buck_set2; - int buck_set3; - int buck2_enable; - int buck3_enable; - int buck4_enable; int buck_default_idx; - int buck2_default_idx; - int buck3_default_idx; - int buck4_default_idx; - int buck_ramp_delay; - int buck2_ramp_delay; - int buck34_ramp_delay; - int buck5_ramp_delay; - int buck16_ramp_delay; - int buck7810_ramp_delay; - int buck9_ramp_delay; - int buck24_ramp_delay; - int buck3_ramp_delay; - int buck7_ramp_delay; - int buck8910_ramp_delay; - - bool buck1_ramp_enable; bool buck2_ramp_enable; bool buck3_ramp_enable; bool buck4_ramp_enable; - bool buck6_ramp_enable; int buck2_init; int buck3_init; diff --git a/include/linux/mfd/wcd934x/registers.h b/include/linux/mfd/wcd934x/registers.h index bb8d2e2766..76a943c83c 100644 --- a/include/linux/mfd/wcd934x/registers.h +++ b/include/linux/mfd/wcd934x/registers.h @@ -18,6 +18,8 @@ #define WCD934X_EFUSE_SENSE_STATE_DEF 0x10 #define WCD934X_EFUSE_SENSE_EN_MASK BIT(0) #define WCD934X_EFUSE_SENSE_ENABLE BIT(0) +#define WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT1 0x002a +#define WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT2 0x002b #define WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT14 0x0037 #define WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT15 0x0038 #define WCD934X_CHIP_TIER_CTRL_EFUSE_STATUS 0x0039 @@ -103,21 +105,58 @@ #define WCD934X_ANA_AMIC3 0x0610 #define WCD934X_ANA_AMIC4 0x0611 #define WCD934X_ANA_MBHC_MECH 0x0614 +#define WCD934X_MBHC_L_DET_EN_MASK BIT(7) +#define WCD934X_MBHC_L_DET_EN BIT(7) +#define WCD934X_MBHC_GND_DET_EN_MASK BIT(6) +#define WCD934X_MBHC_MECH_DETECT_TYPE_MASK BIT(5) +#define WCD934X_MBHC_MECH_DETECT_TYPE_INS 1 +#define WCD934X_MBHC_HPHL_PLUG_TYPE_MASK BIT(4) +#define WCD934X_MBHC_HPHL_PLUG_TYPE_NO 1 +#define WCD934X_MBHC_GND_PLUG_TYPE_MASK BIT(3) +#define WCD934X_MBHC_GND_PLUG_TYPE_NO 1 +#define WCD934X_MBHC_HSL_PULLUP_COMP_EN BIT(2) +#define WCD934X_MBHC_HSG_PULLUP_COMP_EN BIT(1) +#define WCD934X_MBHC_HPHL_100K_TO_GND_EN BIT(0) #define WCD934X_ANA_MBHC_ELECT 0x0615 +#define WCD934X_ANA_MBHC_BIAS_EN_MASK BIT(0) +#define WCD934X_ANA_MBHC_BIAS_EN BIT(0) #define WCD934X_ANA_MBHC_ZDET 0x0616 #define WCD934X_ANA_MBHC_RESULT_1 0x0617 #define WCD934X_ANA_MBHC_RESULT_2 0x0618 #define WCD934X_ANA_MBHC_RESULT_3 0x0619 +#define WCD934X_ANA_MBHC_BTN0 0x061a +#define WCD934X_VTH_MASK GENMASK(7, 2) +#define WCD934X_ANA_MBHC_BTN1 0x061b +#define WCD934X_ANA_MBHC_BTN2 0x061c +#define WCD934X_ANA_MBHC_BTN3 0x061d +#define WCD934X_ANA_MBHC_BTN4 0x061e +#define WCD934X_ANA_MBHC_BTN5 0x061f +#define WCD934X_ANA_MBHC_BTN6 0x0620 +#define WCD934X_ANA_MBHC_BTN7 0x0621 +#define WCD934X_MBHC_BTN_VTH_MASK GENMASK(7, 2) #define WCD934X_ANA_MICB1 0x0622 #define WCD934X_MICB_VAL_MASK GENMASK(5, 0) #define WCD934X_ANA_MICB_EN_MASK GENMASK(7, 6) +#define WCD934X_MICB_DISABLE 0 +#define WCD934X_MICB_ENABLE 1 +#define WCD934X_MICB_PULL_UP 2 +#define WCD934X_MICB_PULL_DOWN 3 #define WCD934X_ANA_MICB_PULL_UP 0x80 #define WCD934X_ANA_MICB_ENABLE 0x40 #define WCD934X_ANA_MICB_DISABLE 0x0 #define WCD934X_ANA_MICB2 0x0623 +#define WCD934X_ANA_MICB2_ENABLE BIT(6) +#define WCD934X_ANA_MICB2_ENABLE_MASK GENMASK(7, 6) +#define WCD934X_ANA_MICB2_VOUT_MASK GENMASK(5, 0) +#define WCD934X_ANA_MICB2_RAMP 0x0624 +#define WCD934X_RAMP_EN_MASK BIT(7) +#define WCD934X_RAMP_SHIFT_CTRL_MASK GENMASK(4, 2) #define WCD934X_ANA_MICB3 0x0625 #define WCD934X_ANA_MICB4 0x0626 #define WCD934X_BIAS_VBG_FINE_ADJ 0x0629 +#define WCD934X_MBHC_CTL_CLK 0x0656 +#define WCD934X_MBHC_CTL_BCS 0x065a +#define WCD934X_MBHC_STATUS_SPARE_1 0x065b #define WCD934X_MICB1_TEST_CTL_1 0x066b #define WCD934X_MICB1_TEST_CTL_2 0x066c #define WCD934X_MICB2_TEST_CTL_1 0x066e @@ -141,7 +180,11 @@ #define WCD934X_HPH_CNP_WG_CTL 0x06cc #define WCD934X_HPH_GM3_BOOST_EN_MASK BIT(7) #define WCD934X_HPH_GM3_BOOST_ENABLE BIT(7) +#define WCD934X_HPH_CNP_WG_TIME 0x06cd #define WCD934X_HPH_OCP_CTL 0x06ce +#define WCD934X_HPH_PA_CTL2 0x06d2 +#define WCD934X_HPHPA_GND_R_MASK BIT(6) +#define WCD934X_HPHPA_GND_L_MASK BIT(4) #define WCD934X_HPH_L_EN 0x06d3 #define WCD934X_HPH_GAIN_SRC_SEL_MASK BIT(5) #define WCD934X_HPH_GAIN_SRC_SEL_COMPANDER 0 @@ -152,6 +195,8 @@ #define WCD934X_HPH_OCP_DET_MASK BIT(0) #define WCD934X_HPH_OCP_DET_ENABLE BIT(0) #define WCD934X_HPH_OCP_DET_DISABLE 0 +#define WCD934X_HPH_R_ATEST 0x06d8 +#define WCD934X_HPHPA_GND_OVR_MASK BIT(1) #define WCD934X_DIFF_LO_LO2_COMPANDER 0x06ea #define WCD934X_DIFF_LO_LO1_COMPANDER 0x06eb #define WCD934X_CLK_SYS_MCLK_PRG 0x0711 @@ -172,7 +217,19 @@ #define WCD934X_SIDO_NEW_VOUT_D_FREQ2 0x071e #define WCD934X_SIDO_RIPPLE_FREQ_EN_MASK BIT(0) #define WCD934X_SIDO_RIPPLE_FREQ_ENABLE BIT(0) +#define WCD934X_MBHC_NEW_CTL_1 0x0720 +#define WCD934X_MBHC_CTL_RCO_EN_MASK BIT(7) +#define WCD935X_MBHC_CTL_RCO_EN BIT(7) #define WCD934X_MBHC_NEW_CTL_2 0x0721 +#define WCD934X_M_RTH_CTL_MASK GENMASK(3, 2) +#define WCD934X_MBHC_NEW_PLUG_DETECT_CTL 0x0722 +#define WCD934X_HSDET_PULLUP_C_MASK GENMASK(7, 6) +#define WCD934X_MBHC_NEW_ZDET_ANA_CTL 0x0723 +#define WCD934X_ZDET_RANGE_CTL_MASK GENMASK(3, 0) +#define WCD934X_ZDET_MAXV_CTL_MASK GENMASK(6, 4) +#define WCD934X_MBHC_NEW_ZDET_RAMP_CTL 0x0724 +#define WCD934X_MBHC_NEW_FSM_STATUS 0x0725 +#define WCD934X_MBHC_NEW_ADC_RESULT 0x0726 #define WCD934X_TX_NEW_AMIC_4_5_SEL 0x0727 #define WCD934X_HPH_NEW_INT_RDAC_HD2_CTL_L 0x0733 #define WCD934X_HPH_NEW_INT_RDAC_OVERRIDE_CTL 0x0735 diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h index 416ee6dd25..3d43c60b49 100644 --- a/include/linux/micrel_phy.h +++ b/include/linux/micrel_phy.h @@ -39,10 +39,26 @@ /* struct phy_device dev_flags definitions */ #define MICREL_PHY_50MHZ_CLK 0x00000001 #define MICREL_PHY_FXEN 0x00000002 +#define MICREL_KSZ8_P1_ERRATA 0x00000003 #define MICREL_KSZ9021_EXTREG_CTRL 0xB #define MICREL_KSZ9021_EXTREG_DATA_WRITE 0xC #define MICREL_KSZ9021_RGMII_CLK_CTRL_PAD_SCEW 0x104 #define MICREL_KSZ9021_RGMII_RX_DATA_PAD_SCEW 0x105 +/* Device specific MII_BMCR (Reg 0) bits */ +/* 1 = HP Auto MDI/MDI-X mode, 0 = Microchip Auto MDI/MDI-X mode */ +#define KSZ886X_BMCR_HP_MDIX BIT(5) +/* 1 = Force MDI (transmit on RXP/RXM pins), 0 = Normal operation + * (transmit on TXP/TXM pins) + */ +#define KSZ886X_BMCR_FORCE_MDI BIT(4) +/* 1 = Disable auto MDI-X */ +#define KSZ886X_BMCR_DISABLE_AUTO_MDIX BIT(3) +#define KSZ886X_BMCR_DISABLE_FAR_END_FAULT BIT(2) +#define KSZ886X_BMCR_DISABLE_TRANSMIT BIT(1) +#define KSZ886X_BMCR_DISABLE_LED BIT(0) + +#define KSZ886X_CTRL_MDIX_STAT BIT(4) + #endif /* _MICREL_PHY_H */ diff --git a/include/linux/migrate.h b/include/linux/migrate.h index 4bb4e519e3..23dadf7aeb 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -76,7 +76,6 @@ static inline int migrate_huge_page_move_mapping(struct address_space *mapping, { return -ENOSYS; } - #endif /* CONFIG_MIGRATION */ #ifdef CONFIG_COMPACTION @@ -95,14 +94,9 @@ static inline void __ClearPageMovable(struct page *page) #endif #ifdef CONFIG_NUMA_BALANCING -extern bool pmd_trans_migrating(pmd_t pmd); extern int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, int node); #else -static inline bool pmd_trans_migrating(pmd_t pmd) -{ - return false; -} static inline int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, int node) { @@ -110,24 +104,6 @@ static inline int migrate_misplaced_page(struct page *page, } #endif /* CONFIG_NUMA_BALANCING */ -#if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE) -extern int migrate_misplaced_transhuge_page(struct mm_struct *mm, - struct vm_area_struct *vma, - pmd_t *pmd, pmd_t entry, - unsigned long address, - struct page *page, int node); -#else -static inline int migrate_misplaced_transhuge_page(struct mm_struct *mm, - struct vm_area_struct *vma, - pmd_t *pmd, pmd_t entry, - unsigned long address, - struct page *page, int node) -{ - return -EAGAIN; -} -#endif /* CONFIG_NUMA_BALANCING && CONFIG_TRANSPARENT_HUGEPAGE*/ - - #ifdef CONFIG_MIGRATION /* diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 578c4ccae9..0025913505 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -1179,6 +1179,7 @@ enum mlx5_cap_type { MLX5_CAP_VDPA_EMULATION = 0x13, MLX5_CAP_DEV_EVENT = 0x14, MLX5_CAP_IPSEC, + MLX5_CAP_GENERAL_2 = 0x20, /* NUM OF CAP Types */ MLX5_CAP_NUM }; @@ -1220,6 +1221,15 @@ enum mlx5_qcam_feature_groups { #define MLX5_CAP_GEN_MAX(mdev, cap) \ MLX5_GET(cmd_hca_cap, mdev->caps.hca_max[MLX5_CAP_GENERAL], cap) +#define MLX5_CAP_GEN_2(mdev, cap) \ + MLX5_GET(cmd_hca_cap_2, mdev->caps.hca_cur[MLX5_CAP_GENERAL_2], cap) + +#define MLX5_CAP_GEN_2_64(mdev, cap) \ + MLX5_GET64(cmd_hca_cap_2, mdev->caps.hca_cur[MLX5_CAP_GENERAL_2], cap) + +#define MLX5_CAP_GEN_2_MAX(mdev, cap) \ + MLX5_GET(cmd_hca_cap_2, mdev->caps.hca_max[MLX5_CAP_GENERAL_2], cap) + #define MLX5_CAP_ETH(mdev, cap) \ MLX5_GET(per_protocol_networking_offload_caps,\ mdev->caps.hca_cur[MLX5_CAP_ETHERNET_OFFLOADS], cap) diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index f8902bcd91..1efe374669 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -554,6 +554,7 @@ struct mlx5_adev { int idx; }; +struct mlx5_ft_pool; struct mlx5_priv { /* IRQ table valid only for real pci devices PF or VF */ struct mlx5_irq_table *irq_table; @@ -606,6 +607,7 @@ struct mlx5_priv { struct mlx5_core_roce roce; struct mlx5_fc_stats fc_stats; struct mlx5_rl_table rl_table; + struct mlx5_ft_pool *ft_pool; struct mlx5_bfreg_data bfregs; struct mlx5_uars_page *uar; diff --git a/include/linux/mlx5/eq.h b/include/linux/mlx5/eq.h index e49d8c0d4f..cea6ecb4b7 100644 --- a/include/linux/mlx5/eq.h +++ b/include/linux/mlx5/eq.h @@ -16,6 +16,7 @@ struct mlx5_eq_param { u8 irq_index; int nent; u64 mask[4]; + cpumask_var_t affinity; }; struct mlx5_eq * diff --git a/include/linux/mlx5/eswitch.h b/include/linux/mlx5/eswitch.h index 17109b65c1..bc7db2e059 100644 --- a/include/linux/mlx5/eswitch.h +++ b/include/linux/mlx5/eswitch.h @@ -98,10 +98,11 @@ u32 mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch *esw, u16 vport_num); /* Reg C1 usage: - * Reg C1 = < ESW_TUN_ID(12) | ESW_TUN_OPTS(12) | ESW_ZONE_ID(8) > + * Reg C1 = < Reserved(1) | ESW_TUN_ID(12) | ESW_TUN_OPTS(11) | ESW_ZONE_ID(8) > * - * Highest 12 bits of reg c1 is the encapsulation tunnel id, next 12 bits is - * encapsulation tunnel options, and the lowest 8 bits are used for zone id. + * Highest bit is reserved for other offloads as marker bit, next 12 bits of reg c1 + * is the encapsulation tunnel id, next 11 bits is encapsulation tunnel options, + * and the lowest 8 bits are used for zone id. * * Zone id is used to restore CT flow when packet misses on chain. * @@ -109,16 +110,18 @@ u32 mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch *esw, * on miss and to support inner header rewrite by means of implicit chain 0 * flows. */ +#define ESW_RESERVED_BITS 1 #define ESW_ZONE_ID_BITS 8 -#define ESW_TUN_OPTS_BITS 12 +#define ESW_TUN_OPTS_BITS 11 #define ESW_TUN_ID_BITS 12 #define ESW_TUN_OPTS_OFFSET ESW_ZONE_ID_BITS #define ESW_TUN_OFFSET ESW_TUN_OPTS_OFFSET #define ESW_ZONE_ID_MASK GENMASK(ESW_ZONE_ID_BITS - 1, 0) -#define ESW_TUN_OPTS_MASK GENMASK(32 - ESW_TUN_ID_BITS - 1, ESW_TUN_OPTS_OFFSET) -#define ESW_TUN_MASK GENMASK(31, ESW_TUN_OFFSET) +#define ESW_TUN_OPTS_MASK GENMASK(31 - ESW_TUN_ID_BITS - ESW_RESERVED_BITS, ESW_TUN_OPTS_OFFSET) +#define ESW_TUN_MASK GENMASK(31 - ESW_RESERVED_BITS, ESW_TUN_OFFSET) #define ESW_TUN_ID_SLOW_TABLE_GOTO_VPORT 0 /* 0 is not a valid tunnel id */ -#define ESW_TUN_OPTS_SLOW_TABLE_GOTO_VPORT 0xFFF /* 0xFFF is a reserved mapping */ +/* 0x7FF is a reserved mapping */ +#define ESW_TUN_OPTS_SLOW_TABLE_GOTO_VPORT GENMASK(ESW_TUN_OPTS_BITS - 1, 0) #define ESW_TUN_SLOW_TABLE_GOTO_VPORT ((ESW_TUN_ID_SLOW_TABLE_GOTO_VPORT << ESW_TUN_OPTS_BITS) | \ ESW_TUN_OPTS_SLOW_TABLE_GOTO_VPORT) #define ESW_TUN_SLOW_TABLE_GOTO_VPORT_MARK ESW_TUN_OPTS_MASK diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h index 1f51f4c3b1..77746f7e35 100644 --- a/include/linux/mlx5/fs.h +++ b/include/linux/mlx5/fs.h @@ -87,6 +87,8 @@ enum { FDB_BYPASS_PATH, FDB_TC_OFFLOAD, FDB_FT_OFFLOAD, + FDB_TC_MISS, + FDB_BR_OFFLOAD, FDB_SLOW_PATH, FDB_PER_VPORT, }; @@ -254,10 +256,16 @@ struct mlx5_modify_hdr *mlx5_modify_header_alloc(struct mlx5_core_dev *dev, void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev, struct mlx5_modify_hdr *modify_hdr); +struct mlx5_pkt_reformat_params { + int type; + u8 param_0; + u8 param_1; + size_t size; + void *data; +}; + struct mlx5_pkt_reformat *mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev, - int reformat_type, - size_t size, - void *reformat_data, + struct mlx5_pkt_reformat_params *params, enum mlx5_flow_namespace_type ns_type); void mlx5_packet_reformat_dealloc(struct mlx5_core_dev *dev, struct mlx5_pkt_reformat *reformat); diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index eb86e80e46..b0009aa364 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -435,7 +435,10 @@ struct mlx5_ifc_flow_table_prop_layout_bits { u8 reserved_at_40[0x20]; - u8 reserved_at_60[0x18]; + u8 reserved_at_60[0x2]; + u8 reformat_insert[0x1]; + u8 reformat_remove[0x1]; + u8 reserver_at_64[0x14]; u8 log_max_ft_num[0x8]; u8 reserved_at_80[0x10]; @@ -953,9 +956,9 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits { }; enum { - MLX5_QP_TIMESTAMP_FORMAT_CAP_FREE_RUNNING = 0x0, - MLX5_QP_TIMESTAMP_FORMAT_CAP_REAL_TIME = 0x1, - MLX5_QP_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME = 0x2, + MLX5_TIMESTAMP_FORMAT_CAP_FREE_RUNNING = 0x0, + MLX5_TIMESTAMP_FORMAT_CAP_REAL_TIME = 0x1, + MLX5_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME = 0x2, }; struct mlx5_ifc_roce_cap_bits { @@ -1296,23 +1299,12 @@ enum { MLX5_STEERING_FORMAT_CONNECTX_6DX = 1, }; -enum { - MLX5_SQ_TIMESTAMP_FORMAT_CAP_FREE_RUNNING = 0x0, - MLX5_SQ_TIMESTAMP_FORMAT_CAP_REAL_TIME = 0x1, - MLX5_SQ_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME = 0x2, -}; - -enum { - MLX5_RQ_TIMESTAMP_FORMAT_CAP_FREE_RUNNING = 0x0, - MLX5_RQ_TIMESTAMP_FORMAT_CAP_REAL_TIME = 0x1, - MLX5_RQ_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME = 0x2, -}; - struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_0[0x1f]; u8 vhca_resource_manager[0x1]; - u8 reserved_at_20[0x3]; + u8 hca_cap_2[0x1]; + u8 reserved_at_21[0x2]; u8 event_on_vhca_state_teardown_request[0x1]; u8 event_on_vhca_state_in_use[0x1]; u8 event_on_vhca_state_active[0x1]; @@ -1520,7 +1512,9 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 uar_4k[0x1]; u8 reserved_at_241[0x9]; u8 uar_sz[0x6]; - u8 reserved_at_250[0x8]; + u8 reserved_at_248[0x2]; + u8 umem_uid_0[0x1]; + u8 reserved_at_250[0x5]; u8 log_pg_sz[0x8]; u8 bf[0x1]; @@ -1732,6 +1726,17 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_7c0[0x40]; }; +struct mlx5_ifc_cmd_hca_cap_2_bits { + u8 reserved_at_0[0xa0]; + + u8 max_reformat_insert_size[0x8]; + u8 max_reformat_insert_offset[0x8]; + u8 max_reformat_remove_size[0x8]; + u8 max_reformat_remove_offset[0x8]; + + u8 reserved_at_c0[0x740]; +}; + enum mlx5_flow_destination_type { MLX5_FLOW_DESTINATION_TYPE_VPORT = 0x0, MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE = 0x1, @@ -2944,9 +2949,9 @@ enum { }; enum { - MLX5_QPC_TIMESTAMP_FORMAT_FREE_RUNNING = 0x0, - MLX5_QPC_TIMESTAMP_FORMAT_DEFAULT = 0x1, - MLX5_QPC_TIMESTAMP_FORMAT_REAL_TIME = 0x2, + MLX5_TIMESTAMP_FORMAT_FREE_RUNNING = 0x0, + MLX5_TIMESTAMP_FORMAT_DEFAULT = 0x1, + MLX5_TIMESTAMP_FORMAT_REAL_TIME = 0x2, }; struct mlx5_ifc_qpc_bits { @@ -3105,6 +3110,7 @@ struct mlx5_ifc_roce_addr_layout_bits { union mlx5_ifc_hca_cap_union_bits { struct mlx5_ifc_cmd_hca_cap_bits cmd_hca_cap; + struct mlx5_ifc_cmd_hca_cap_2_bits cmd_hca_cap_2; struct mlx5_ifc_odp_cap_bits odp_cap; struct mlx5_ifc_atomic_caps_bits atomic_caps; struct mlx5_ifc_roce_cap_bits roce_cap; @@ -3396,12 +3402,6 @@ enum { MLX5_SQC_STATE_ERR = 0x3, }; -enum { - MLX5_SQC_TIMESTAMP_FORMAT_FREE_RUNNING = 0x0, - MLX5_SQC_TIMESTAMP_FORMAT_DEFAULT = 0x1, - MLX5_SQC_TIMESTAMP_FORMAT_REAL_TIME = 0x2, -}; - struct mlx5_ifc_sqc_bits { u8 rlky[0x1]; u8 cd_master[0x1]; @@ -3507,12 +3507,6 @@ enum { MLX5_RQC_STATE_ERR = 0x3, }; -enum { - MLX5_RQC_TIMESTAMP_FORMAT_FREE_RUNNING = 0x0, - MLX5_RQC_TIMESTAMP_FORMAT_DEFAULT = 0x1, - MLX5_RQC_TIMESTAMP_FORMAT_REAL_TIME = 0x2, -}; - struct mlx5_ifc_rqc_bits { u8 rlky[0x1]; u8 delay_drop_en[0x1]; @@ -3790,8 +3784,8 @@ struct mlx5_ifc_eqc_bits { u8 reserved_at_80[0x20]; - u8 reserved_at_a0[0x18]; - u8 intr[0x8]; + u8 reserved_at_a0[0x14]; + u8 intr[0xc]; u8 reserved_at_c0[0x3]; u8 log_page_size[0x5]; @@ -5785,12 +5779,14 @@ struct mlx5_ifc_query_eq_in_bits { }; struct mlx5_ifc_packet_reformat_context_in_bits { - u8 reserved_at_0[0x5]; - u8 reformat_type[0x3]; - u8 reserved_at_8[0xe]; + u8 reformat_type[0x8]; + u8 reserved_at_8[0x4]; + u8 reformat_param_0[0x4]; + u8 reserved_at_10[0x6]; u8 reformat_data_size[0xa]; - u8 reserved_at_20[0x10]; + u8 reformat_param_1[0x8]; + u8 reserved_at_28[0x8]; u8 reformat_data[2][0x8]; u8 more_reformat_data[][0x8]; @@ -5830,12 +5826,20 @@ struct mlx5_ifc_alloc_packet_reformat_context_out_bits { u8 reserved_at_60[0x20]; }; +enum { + MLX5_REFORMAT_CONTEXT_ANCHOR_MAC_START = 0x1, + MLX5_REFORMAT_CONTEXT_ANCHOR_IP_START = 0x7, + MLX5_REFORMAT_CONTEXT_ANCHOR_TCP_UDP_START = 0x9, +}; + enum mlx5_reformat_ctx_type { MLX5_REFORMAT_TYPE_L2_TO_VXLAN = 0x0, MLX5_REFORMAT_TYPE_L2_TO_NVGRE = 0x1, MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL = 0x2, MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2 = 0x3, MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL = 0x4, + MLX5_REFORMAT_TYPE_INSERT_HDR = 0xf, + MLX5_REFORMAT_TYPE_REMOVE_HDR = 0x10, }; struct mlx5_ifc_alloc_packet_reformat_context_in_bits { @@ -5956,6 +5960,8 @@ enum { MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM = 0x59, MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM = 0x5B, MLX5_ACTION_IN_FIELD_IPSEC_SYNDROME = 0x5D, + MLX5_ACTION_IN_FIELD_OUT_EMD_47_32 = 0x6F, + MLX5_ACTION_IN_FIELD_OUT_EMD_31_0 = 0x70, }; struct mlx5_ifc_alloc_modify_header_context_out_bits { @@ -11055,6 +11061,11 @@ struct mlx5_ifc_create_sampler_obj_in_bits { struct mlx5_ifc_sampler_obj_bits sampler_object; }; +struct mlx5_ifc_query_sampler_obj_out_bits { + struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr; + struct mlx5_ifc_sampler_obj_bits sampler_object; +}; + enum { MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_KEY_SIZE_128 = 0x0, MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_KEY_SIZE_256 = 0x1, diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h index b7deb790f2..61e48d459b 100644 --- a/include/linux/mlx5/qp.h +++ b/include/linux/mlx5/qp.h @@ -550,8 +550,8 @@ static inline const char *mlx5_qp_state_str(int state) static inline int mlx5_get_qp_default_ts(struct mlx5_core_dev *dev) { return !MLX5_CAP_ROCE(dev, qp_ts_format) ? - MLX5_QPC_TIMESTAMP_FORMAT_FREE_RUNNING : - MLX5_QPC_TIMESTAMP_FORMAT_DEFAULT; + MLX5_TIMESTAMP_FORMAT_FREE_RUNNING : + MLX5_TIMESTAMP_FORMAT_DEFAULT; } #endif /* MLX5_QP_H */ diff --git a/include/linux/mm.h b/include/linux/mm.h index 8ae31622de..7ca22e6e69 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -46,7 +46,7 @@ extern int sysctl_page_lock_unfairness; void init_mm_internals(void); -#ifndef CONFIG_NEED_MULTIPLE_NODES /* Don't use mapnrs, do it properly */ +#ifndef CONFIG_NUMA /* Don't use mapnrs, do it properly */ extern unsigned long max_mapnr; static inline void set_max_mapnr(unsigned long limit) @@ -124,16 +124,6 @@ extern int mmap_rnd_compat_bits __read_mostly; #define lm_alias(x) __va(__pa_symbol(x)) #endif -/* - * With CONFIG_CFI_CLANG, the compiler replaces function addresses in - * instrumented C code with jump table addresses. Architectures that - * support CFI can define this macro to return the actual function address - * when needed. - */ -#ifndef function_nocfi -#define function_nocfi(x) (x) -#endif - /* * To prevent common memory management code establishing * a zero page mapping on a read fault. @@ -155,7 +145,7 @@ extern int mmap_rnd_compat_bits __read_mostly; /* This function must be updated when the size of struct page grows above 80 * or reduces below 56. The idea that compiler optimizes out switch() * statement, and only leaves move/store instructions. Also the compiler can - * combine write statments if they are both assignments and can be reordered, + * combine write statements if they are both assignments and can be reordered, * this can result in several of the writes here being dropped. */ #define mm_zero_struct_page(pp) __mm_zero_struct_page(pp) @@ -234,7 +224,11 @@ int overcommit_policy_handler(struct ctl_table *, int, void *, size_t *, int __add_to_page_cache_locked(struct page *page, struct address_space *mapping, pgoff_t index, gfp_t gfp, void **shadowp); +#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP) #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n)) +#else +#define nth_page(page,n) ((page) + (n)) +#endif /* to align the pointer to the (next) page boundary */ #define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE) @@ -244,6 +238,9 @@ int __add_to_page_cache_locked(struct page *page, struct address_space *mapping, #define lru_to_page(head) (list_entry((head)->prev, struct page, lru)) +void setup_initial_init_mm(void *start_code, void *end_code, + void *end_data, void *brk); + /* * Linux kernel virtual memory manager primitives. * The idea being to have a "virtual" mm in the same way @@ -546,7 +543,12 @@ struct vm_fault { pud_t *pud; /* Pointer to pud entry matching * the 'address' */ - pte_t orig_pte; /* Value of PTE at the time of fault */ + union { + pte_t orig_pte; /* Value of PTE at the time of fault */ + pmd_t orig_pmd; /* Value of PMD at the time of fault, + * used by PMD fault only. + */ + }; struct page *cow_page; /* Page handler may use for COW fault */ struct page *page; /* ->fault handlers should return a @@ -904,6 +906,7 @@ void __put_page(struct page *page); void put_pages_list(struct list_head *pages); void split_page(struct page *page, unsigned int order); +void copy_huge_page(struct page *dst, struct page *src); /* * Compound pages have a destructor function. Provide a @@ -1341,7 +1344,7 @@ static inline bool page_needs_cow_for_dma(struct vm_area_struct *vma, if (!is_cow_mapping(vma->vm_flags)) return false; - if (!atomic_read(&vma->vm_mm->has_pinned)) + if (!test_bit(MMF_HAS_PINNED, &vma->vm_mm->flags)) return false; return page_maybe_dma_pinned(page); @@ -1668,10 +1671,11 @@ struct address_space *page_mapping(struct page *page); static inline bool page_is_pfmemalloc(const struct page *page) { /* - * Page index cannot be this large so this must be - * a pfmemalloc page. + * lru.next has bit 1 set if the page is allocated from the + * pfmemalloc reserves. Callers may simply overwrite it if + * they do not need to preserve that information. */ - return page->index == -1UL; + return (uintptr_t)page->lru.next & BIT(1); } /* @@ -1680,12 +1684,12 @@ static inline bool page_is_pfmemalloc(const struct page *page) */ static inline void set_page_pfmemalloc(struct page *page) { - page->index = -1UL; + page->lru.next = (void *)BIT(1); } static inline void clear_page_pfmemalloc(struct page *page) { - page->index = 0; + page->lru.next = NULL; } /* @@ -1709,8 +1713,8 @@ extern bool can_do_mlock(void); #else static inline bool can_do_mlock(void) { return false; } #endif -extern int user_shm_lock(size_t, struct user_struct *); -extern void user_shm_unlock(size_t, struct user_struct *); +extern int user_shm_lock(size_t, struct ucounts *); +extern void user_shm_unlock(size_t, struct ucounts *); /* * Parameter block passed down to zap_pte_range in exceptional cases. @@ -1850,12 +1854,8 @@ extern int try_to_release_page(struct page * page, gfp_t gfp_mask); extern void do_invalidatepage(struct page *page, unsigned int offset, unsigned int length); -void __set_page_dirty(struct page *, struct address_space *, int warn); -int __set_page_dirty_nobuffers(struct page *page); -int __set_page_dirty_no_writeback(struct page *page); int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page); -void account_page_dirtied(struct page *page, struct address_space *mapping); void account_page_cleaned(struct page *page, struct address_space *mapping, struct bdi_writeback *wb); int set_page_dirty(struct page *page); @@ -2420,7 +2420,7 @@ static inline unsigned long free_initmem_default(int poison) extern char __init_begin[], __init_end[]; return free_reserved_area(&__init_begin, &__init_end, - poison, "unused kernel"); + poison, "unused kernel image (initmem)"); } static inline unsigned long get_num_physpages(void) @@ -2460,7 +2460,7 @@ extern void get_pfn_range_for_nid(unsigned int nid, unsigned long *start_pfn, unsigned long *end_pfn); extern unsigned long find_min_pfn_with_active_regions(void); -#ifndef CONFIG_NEED_MULTIPLE_NODES +#ifndef CONFIG_NUMA static inline int early_pfn_to_nid(unsigned long pfn) { return 0; @@ -2474,7 +2474,6 @@ extern void set_dma_reserve(unsigned long new_dma_reserve); extern void memmap_init_range(unsigned long, int, unsigned long, unsigned long, unsigned long, enum meminit_context, struct vmem_altmap *, int migratetype); -extern void memmap_init_zone(struct zone *zone); extern void setup_per_zone_wmarks(void); extern int __meminit init_per_zone_wmark_min(void); extern void mem_init(void); @@ -2681,17 +2680,45 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr, struct vm_area_struct **pprev); -/* Look up the first VMA which intersects the interval start_addr..end_addr-1, - NULL if none. Assume start_addr < end_addr. */ -static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr) +/** + * find_vma_intersection() - Look up the first VMA which intersects the interval + * @mm: The process address space. + * @start_addr: The inclusive start user address. + * @end_addr: The exclusive end user address. + * + * Returns: The first VMA within the provided range, %NULL otherwise. Assumes + * start_addr < end_addr. + */ +static inline +struct vm_area_struct *find_vma_intersection(struct mm_struct *mm, + unsigned long start_addr, + unsigned long end_addr) { - struct vm_area_struct * vma = find_vma(mm,start_addr); + struct vm_area_struct *vma = find_vma(mm, start_addr); if (vma && end_addr <= vma->vm_start) vma = NULL; return vma; } +/** + * vma_lookup() - Find a VMA at a specific address + * @mm: The process address space. + * @addr: The user address. + * + * Return: The vm_area_struct at the given address, %NULL otherwise. + */ +static inline +struct vm_area_struct *vma_lookup(struct mm_struct *mm, unsigned long addr) +{ + struct vm_area_struct *vma = find_vma(mm, addr); + + if (vma && addr < vma->vm_start) + vma = NULL; + + return vma; +} + static inline unsigned long vm_start_gap(struct vm_area_struct *vma) { unsigned long vm_start = vma->vm_start; @@ -3049,6 +3076,11 @@ static inline void print_vma_addr(char *prefix, unsigned long rip) } #endif +int vmemmap_remap_free(unsigned long start, unsigned long end, + unsigned long reuse); +int vmemmap_remap_alloc(unsigned long start, unsigned long end, + unsigned long reuse, gfp_t gfp_mask); + void *sparse_buffer_alloc(unsigned long size); struct page * __populate_section_memmap(unsigned long pfn, unsigned long nr_pages, int nid, struct vmem_altmap *altmap); diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 8f0fb62e89..52bbd2b7cb 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -96,6 +96,13 @@ struct page { unsigned long private; }; struct { /* page_pool used by netstack */ + /** + * @pp_magic: magic value to avoid recycling non + * page_pool allocated pages. + */ + unsigned long pp_magic; + struct page_pool *pp; + unsigned long _pp_mapping_pad; /** * @dma_addr: might require a 64-bit value on * 32-bit architectures. @@ -397,7 +404,7 @@ struct mm_struct { unsigned long mmap_base; /* base of mmap area */ unsigned long mmap_legacy_base; /* base of mmap area in bottom-up allocations */ #ifdef CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES - /* Base adresses for compatible mmap() */ + /* Base addresses for compatible mmap() */ unsigned long mmap_compat_base; unsigned long mmap_compat_legacy_base; #endif @@ -435,16 +442,6 @@ struct mm_struct { */ atomic_t mm_count; - /** - * @has_pinned: Whether this mm has pinned any pages. This can - * be either replaced in the future by @pinned_vm when it - * becomes stable, or grow into a counter on its own. We're - * aggresive on this bit now - even if the pinned pages were - * unpinned later on, we'll still keep this bit set for the - * lifecycle of this mm just for simplicity. - */ - atomic_t has_pinned; - #ifdef CONFIG_MMU atomic_long_t pgtables_bytes; /* PTE page table pages */ #endif diff --git a/include/linux/mman.h b/include/linux/mman.h index 629cefc4ec..ebb09a9642 100644 --- a/include/linux/mman.h +++ b/include/linux/mman.h @@ -31,6 +31,8 @@ /* * The historical set of flags that all mmap implementations implicitly * support when a ->mmap_validate() op is not provided in file_operations. + * + * MAP_EXECUTABLE is completely ignored throughout the kernel. */ #define LEGACY_MAP_MASK (MAP_SHARED \ | MAP_PRIVATE \ diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h index 8e21d1ff7d..43164e7567 100644 --- a/include/linux/mmc/card.h +++ b/include/linux/mmc/card.h @@ -139,6 +139,8 @@ struct sd_scr { unsigned char cmds; #define SD_SCR_CMD20_SUPPORT (1<<0) #define SD_SCR_CMD23_SUPPORT (1<<1) +#define SD_SCR_CMD48_SUPPORT (1<<2) +#define SD_SCR_CMD58_SUPPORT (1<<3) }; struct sd_ssr { @@ -189,6 +191,25 @@ struct sd_switch_caps { #define SD_MAX_CURRENT_800 (1 << SD_SET_CURRENT_LIMIT_800) }; +struct sd_ext_reg { + u8 fno; + u8 page; + u16 offset; + u8 rev; + u8 feature_enabled; + u8 feature_support; +/* Power Management Function. */ +#define SD_EXT_POWER_OFF_NOTIFY (1<<0) +#define SD_EXT_POWER_SUSTENANCE (1<<1) +#define SD_EXT_POWER_DOWN_MODE (1<<2) +/* Performance Enhancement Function. */ +#define SD_EXT_PERF_FX_EVENT (1<<0) +#define SD_EXT_PERF_CARD_MAINT (1<<1) +#define SD_EXT_PERF_HOST_MAINT (1<<2) +#define SD_EXT_PERF_CACHE (1<<3) +#define SD_EXT_PERF_CMD_QUEUE (1<<4) +}; + struct sdio_cccr { unsigned int sdio_vsn; unsigned int sd_vsn; @@ -292,6 +313,8 @@ struct mmc_card { struct sd_scr scr; /* extra SD information */ struct sd_ssr ssr; /* yet more SD information */ struct sd_switch_caps sw_caps; /* switch (CMD6) caps */ + struct sd_ext_reg ext_power; /* SD extension reg for PM */ + struct sd_ext_reg ext_perf; /* SD extension reg for PERF */ unsigned int sdio_funcs; /* number of SDIO functions */ atomic_t sdio_funcs_probed; /* number of probed SDIO funcs */ diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index c7e7b43600..0abd47e9ef 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -632,6 +632,6 @@ static inline enum dma_data_direction mmc_get_dma_dir(struct mmc_data *data) } int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error); -int mmc_abort_tuning(struct mmc_host *host, u32 opcode); +int mmc_send_abort_tuning(struct mmc_host *host, u32 opcode); #endif /* LINUX_MMC_HOST_H */ diff --git a/include/linux/mmc/sd.h b/include/linux/mmc/sd.h index 2236aa540f..6727576a87 100644 --- a/include/linux/mmc/sd.h +++ b/include/linux/mmc/sd.h @@ -29,6 +29,10 @@ #define SD_APP_OP_COND 41 /* bcr [31:0] OCR R3 */ #define SD_APP_SEND_SCR 51 /* adtc R1 */ + /* class 11 */ +#define SD_READ_EXTR_SINGLE 48 /* adtc [31:0] R1 */ +#define SD_WRITE_EXTR_SINGLE 49 /* adtc [31:0] R1 */ + /* OCR bit definitions */ #define SD_OCR_S18R (1 << 24) /* 1.8V switching request */ #define SD_ROCR_S18A SD_OCR_S18R /* 1.8V switching accepted by card */ diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h index 5d0767cb42..1935d4c72d 100644 --- a/include/linux/mmdebug.h +++ b/include/linux/mmdebug.h @@ -9,8 +9,7 @@ struct page; struct vm_area_struct; struct mm_struct; -extern void dump_page(struct page *page, const char *reason); -extern void __dump_page(struct page *page, const char *reason); +void dump_page(struct page *page, const char *reason); void dump_vma(const struct vm_area_struct *vma); void dump_mm(const struct mm_struct *mm); diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h index 1a6a9eb6d3..45fc2c81e3 100644 --- a/include/linux/mmu_notifier.h +++ b/include/linux/mmu_notifier.h @@ -33,7 +33,7 @@ struct mmu_interval_notifier; * * @MMU_NOTIFY_SOFT_DIRTY: soft dirty accounting (still same page and same * access flags). User should soft dirty the page in the end callback to make - * sure that anyone relying on soft dirtyness catch pages that might be written + * sure that anyone relying on soft dirtiness catch pages that might be written * through non CPU mappings. * * @MMU_NOTIFY_RELEASE: used during mmu_interval_notifier invalidate to signal @@ -41,7 +41,12 @@ struct mmu_interval_notifier; * * @MMU_NOTIFY_MIGRATE: used during migrate_vma_collect() invalidate to signal * a device driver to possibly ignore the invalidation if the - * migrate_pgmap_owner field matches the driver's device private pgmap owner. + * owner field matches the driver's device private pgmap owner. + * + * @MMU_NOTIFY_EXCLUSIVE: to signal a device driver that the device will no + * longer have exclusive access to the page. When sent during creation of an + * exclusive range the owner will be initialised to the value provided by the + * caller of make_device_exclusive_range(), otherwise the owner will be NULL. */ enum mmu_notifier_event { MMU_NOTIFY_UNMAP = 0, @@ -51,6 +56,7 @@ enum mmu_notifier_event { MMU_NOTIFY_SOFT_DIRTY, MMU_NOTIFY_RELEASE, MMU_NOTIFY_MIGRATE, + MMU_NOTIFY_EXCLUSIVE, }; #define MMU_NOTIFIER_RANGE_BLOCKABLE (1 << 0) @@ -161,7 +167,7 @@ struct mmu_notifier_ops { * decrease the refcount. If the refcount is decreased on * invalidate_range_start() then the VM can free pages as page * table entries are removed. If the refcount is only - * droppped on invalidate_range_end() then the driver itself + * dropped on invalidate_range_end() then the driver itself * will drop the last refcount but it must take care to flush * any secondary tlb before doing the final free on the * page. Pages will no longer be referenced by the linux @@ -190,7 +196,7 @@ struct mmu_notifier_ops { * If invalidate_range() is used to manage a non-CPU TLB with * shared page-tables, it not necessary to implement the * invalidate_range_start()/end() notifiers, as - * invalidate_range() alread catches the points in time when an + * invalidate_range() already catches the points in time when an * external TLB range needs to be flushed. For more in depth * discussion on this see Documentation/vm/mmu_notifier.rst * @@ -269,7 +275,7 @@ struct mmu_notifier_range { unsigned long end; unsigned flags; enum mmu_notifier_event event; - void *migrate_pgmap_owner; + void *owner; }; static inline int mm_has_notifiers(struct mm_struct *mm) @@ -363,7 +369,7 @@ mmu_interval_read_retry(struct mmu_interval_notifier *interval_sub, * mmu_interval_read_retry() will return true. * * False is not reliable and only suggests a collision may not have - * occured. It can be called many times and does not have to hold the user + * occurred. It can be called many times and does not have to hold the user * provided lock. * * This call can be used as part of loops and other expensive operations to @@ -521,14 +527,14 @@ static inline void mmu_notifier_range_init(struct mmu_notifier_range *range, range->flags = flags; } -static inline void mmu_notifier_range_init_migrate( - struct mmu_notifier_range *range, unsigned int flags, +static inline void mmu_notifier_range_init_owner( + struct mmu_notifier_range *range, + enum mmu_notifier_event event, unsigned int flags, struct vm_area_struct *vma, struct mm_struct *mm, - unsigned long start, unsigned long end, void *pgmap) + unsigned long start, unsigned long end, void *owner) { - mmu_notifier_range_init(range, MMU_NOTIFY_MIGRATE, flags, vma, mm, - start, end); - range->migrate_pgmap_owner = pgmap; + mmu_notifier_range_init(range, event, flags, vma, mm, start, end); + range->owner = owner; } #define ptep_clear_flush_young_notify(__vma, __address, __ptep) \ @@ -655,8 +661,8 @@ static inline void _mmu_notifier_range_init(struct mmu_notifier_range *range, #define mmu_notifier_range_init(range,event,flags,vma,mm,start,end) \ _mmu_notifier_range_init(range, start, end) -#define mmu_notifier_range_init_migrate(range, flags, vma, mm, start, end, \ - pgmap) \ +#define mmu_notifier_range_init_owner(range, event, flags, vma, mm, start, \ + end, owner) \ _mmu_notifier_range_init(range, start, end) static inline bool diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 0d53eba1c3..fcb5355600 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -20,6 +20,7 @@ #include #include #include +#include #include /* Free memory management - zoned buddy allocator. */ @@ -113,7 +114,7 @@ static inline bool free_area_empty(struct free_area *area, int migratetype) struct pglist_data; /* - * Add a wild amount of padding here to ensure datas fall into separate + * Add a wild amount of padding here to ensure data fall into separate * cachelines. There are very few zone structures in the machine, so space * consumption is not a concern here. */ @@ -134,10 +135,10 @@ enum numa_stat_item { NUMA_INTERLEAVE_HIT, /* interleaver preferred this zone */ NUMA_LOCAL, /* allocation from local node */ NUMA_OTHER, /* allocation from other node */ - NR_VM_NUMA_STAT_ITEMS + NR_VM_NUMA_EVENT_ITEMS }; #else -#define NR_VM_NUMA_STAT_ITEMS 0 +#define NR_VM_NUMA_EVENT_ITEMS 0 #endif enum zone_stat_item { @@ -332,29 +333,55 @@ enum zone_watermarks { NR_WMARK }; +/* + * One per migratetype for each PAGE_ALLOC_COSTLY_ORDER plus one additional + * for pageblock size for THP if configured. + */ +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +#define NR_PCP_THP 1 +#else +#define NR_PCP_THP 0 +#endif +#define NR_PCP_LISTS (MIGRATE_PCPTYPES * (PAGE_ALLOC_COSTLY_ORDER + 1 + NR_PCP_THP)) + +/* + * Shift to encode migratetype and order in the same integer, with order + * in the least significant bits. + */ +#define NR_PCP_ORDER_WIDTH 8 +#define NR_PCP_ORDER_MASK ((1<_watermark[WMARK_MIN] + z->watermark_boost) #define low_wmark_pages(z) (z->_watermark[WMARK_LOW] + z->watermark_boost) #define high_wmark_pages(z) (z->_watermark[WMARK_HIGH] + z->watermark_boost) #define wmark_pages(z, i) (z->_watermark[i] + z->watermark_boost) +/* Fields and list protected by pagesets local_lock in page_alloc.c */ struct per_cpu_pages { int count; /* number of pages in the list */ int high; /* high watermark, emptying needed */ int batch; /* chunk size for buddy add/remove */ + short free_factor; /* batch scaling factor during free */ +#ifdef CONFIG_NUMA + short expire; /* When 0, remote pagesets are drained */ +#endif /* Lists of pages, one per migrate type stored on the pcp-lists */ - struct list_head lists[MIGRATE_PCPTYPES]; + struct list_head lists[NR_PCP_LISTS]; }; -struct per_cpu_pageset { - struct per_cpu_pages pcp; -#ifdef CONFIG_NUMA - s8 expire; - u16 vm_numa_stat_diff[NR_VM_NUMA_STAT_ITEMS]; -#endif +struct per_cpu_zonestat { #ifdef CONFIG_SMP - s8 stat_threshold; s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS]; + s8 stat_threshold; +#endif +#ifdef CONFIG_NUMA + /* + * Low priority inaccurate counters that are only folded + * on demand. Use a large type to avoid the overhead of + * folding during refresh_cpu_vm_stats. + */ + unsigned long vm_numa_event[NR_VM_NUMA_EVENT_ITEMS]; #endif }; @@ -484,7 +511,8 @@ struct zone { int node; #endif struct pglist_data *zone_pgdat; - struct per_cpu_pageset __percpu *pageset; + struct per_cpu_pages __percpu *per_cpu_pageset; + struct per_cpu_zonestat __percpu *per_cpu_zonestats; /* * the high and batch values are copied to individual pagesets for * faster access @@ -619,7 +647,7 @@ struct zone { ZONE_PADDING(_pad3_) /* Zone statistics */ atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; - atomic_long_t vm_numa_stat[NR_VM_NUMA_STAT_ITEMS]; + atomic_long_t vm_numa_event[NR_VM_NUMA_EVENT_ITEMS]; } ____cacheline_internodealigned_in_smp; enum pgdat_flags { @@ -637,6 +665,7 @@ enum zone_flags { ZONE_BOOSTED_WATERMARK, /* zone recently boosted watermarks. * Cleared when kswapd is woken. */ + ZONE_RECLAIM_ACTIVE, /* kswapd may be scanning the zone. */ }; static inline unsigned long zone_managed_pages(struct zone *zone) @@ -738,10 +767,12 @@ struct zonelist { struct zoneref _zonerefs[MAX_ZONES_PER_ZONELIST + 1]; }; -#ifndef CONFIG_DISCONTIGMEM -/* The array of struct pages - for discontigmem use pgdat->lmem_map */ +/* + * The array of struct pages for flatmem. + * It must be declared for SPARSEMEM as well because there are configurations + * that rely on that. + */ extern struct page *mem_map; -#endif #ifdef CONFIG_TRANSPARENT_HUGEPAGE struct deferred_split { @@ -775,7 +806,7 @@ typedef struct pglist_data { struct zonelist node_zonelists[MAX_ZONELISTS]; int nr_zones; /* number of populated zones in this node */ -#ifdef CONFIG_FLAT_NODE_MEM_MAP /* means !SPARSEMEM */ +#ifdef CONFIG_FLATMEM /* means !SPARSEMEM */ struct page *node_mem_map; #ifdef CONFIG_PAGE_EXTENSION struct page_ext *node_page_ext; @@ -865,7 +896,7 @@ typedef struct pglist_data { #define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages) #define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages) -#ifdef CONFIG_FLAT_NODE_MEM_MAP +#ifdef CONFIG_FLATMEM #define pgdat_page_nr(pgdat, pagenr) ((pgdat)->node_mem_map + (pagenr)) #else #define pgdat_page_nr(pgdat, pagenr) pfn_to_page((pgdat)->node_start_pfn + (pagenr)) @@ -982,22 +1013,11 @@ static inline void zone_set_nid(struct zone *zone, int nid) {} extern int movable_zone; -#ifdef CONFIG_HIGHMEM -static inline int zone_movable_is_highmem(void) -{ -#ifdef CONFIG_NEED_MULTIPLE_NODES - return movable_zone == ZONE_HIGHMEM; -#else - return (ZONE_MOVABLE - 1) == ZONE_HIGHMEM; -#endif -} -#endif - static inline int is_highmem_idx(enum zone_type idx) { #ifdef CONFIG_HIGHMEM return (idx == ZONE_HIGHMEM || - (idx == ZONE_MOVABLE && zone_movable_is_highmem())); + (idx == ZONE_MOVABLE && movable_zone == ZONE_HIGHMEM)); #else return 0; #endif @@ -1029,7 +1049,7 @@ int watermark_scale_factor_sysctl_handler(struct ctl_table *, int, void *, extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES]; int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, void *, size_t *, loff_t *); -int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int, +int percpu_pagelist_high_fraction_sysctl_handler(struct ctl_table *, int, void *, size_t *, loff_t *); int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int, void *, size_t *, loff_t *); @@ -1037,21 +1057,24 @@ int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int, void *, size_t *, loff_t *); int numa_zonelist_order_handler(struct ctl_table *, int, void *, size_t *, loff_t *); -extern int percpu_pagelist_fraction; +extern int percpu_pagelist_high_fraction; extern char numa_zonelist_order[]; #define NUMA_ZONELIST_ORDER_LEN 16 -#ifndef CONFIG_NEED_MULTIPLE_NODES +#ifndef CONFIG_NUMA extern struct pglist_data contig_page_data; -#define NODE_DATA(nid) (&contig_page_data) +static inline struct pglist_data *NODE_DATA(int nid) +{ + return &contig_page_data; +} #define NODE_MEM_MAP(nid) mem_map -#else /* CONFIG_NEED_MULTIPLE_NODES */ +#else /* CONFIG_NUMA */ #include -#endif /* !CONFIG_NEED_MULTIPLE_NODES */ +#endif /* !CONFIG_NUMA */ extern struct pglist_data *first_online_pgdat(void); extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat); @@ -1200,8 +1223,6 @@ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist, #ifdef CONFIG_SPARSEMEM /* - * SECTION_SHIFT #bits space required to store a section # - * * PA_SECTION_SHIFT physical address to/from section number * PFN_SECTION_SHIFT pfn to/from section number */ @@ -1427,10 +1448,30 @@ static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn) #endif #ifndef CONFIG_HAVE_ARCH_PFN_VALID +/** + * pfn_valid - check if there is a valid memory map entry for a PFN + * @pfn: the page frame number to check + * + * Check if there is a valid memory map entry aka struct page for the @pfn. + * Note, that availability of the memory map entry does not imply that + * there is actual usable memory at that @pfn. The struct page may + * represent a hole or an unusable page frame. + * + * Return: 1 for PFNs that have memory map entries and 0 otherwise + */ static inline int pfn_valid(unsigned long pfn) { struct mem_section *ms; + /* + * Ensure the upper PAGE_SHIFT bits are clear in the + * pfn. Else it might lead to false positives when + * some of the upper bits are set, but the lower bits + * match a valid pfn. + */ + if (PHYS_PFN(PFN_PHYS(pfn)) != pfn) + return 0; + if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) return 0; ms = __nr_to_section(pfn_to_section_nr(pfn)); diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index 7d45b5f989..8e291cfdaf 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h @@ -447,6 +447,7 @@ struct hv_vmbus_device_id { struct rpmsg_device_id { char name[RPMSG_NAME_SIZE]; + kernel_ulong_t driver_data; }; /* i2c */ diff --git a/include/linux/module.h b/include/linux/module.h index 8100bb477d..8a298d820d 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -11,6 +11,7 @@ #include #include +#include #include #include #include @@ -369,6 +370,11 @@ struct module { /* Unique handle for this module */ char name[MODULE_NAME_LEN]; +#ifdef CONFIG_STACKTRACE_BUILD_ID + /* Module build ID */ + unsigned char build_id[BUILD_ID_SIZE_MAX]; +#endif + /* Sysfs stuff. */ struct module_kobject mkobj; struct module_attribute *modinfo_attrs; @@ -636,7 +642,7 @@ void *dereference_module_function_descriptor(struct module *mod, void *ptr); const char *module_address_lookup(unsigned long addr, unsigned long *symbolsize, unsigned long *offset, - char **modname, + char **modname, const unsigned char **modbuildid, char *namebuf); int lookup_module_symbol_name(unsigned long addr, char *symname); int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name); @@ -740,6 +746,7 @@ static inline const char *module_address_lookup(unsigned long addr, unsigned long *symbolsize, unsigned long *offset, char **modname, + const unsigned char **modbuildid, char *namebuf) { return NULL; diff --git a/include/linux/mpi.h b/include/linux/mpi.h index 3e5358f4de..eb0d1c1db2 100644 --- a/include/linux/mpi.h +++ b/include/linux/mpi.h @@ -200,7 +200,7 @@ struct mpi_ec_ctx { unsigned int nbits; /* Number of bits. */ /* Domain parameters. Note that they may not all be set and if set - * the MPIs may be flaged as constant. + * the MPIs may be flagged as constant. */ MPI p; /* Prime specifying the field GF(p). */ MPI a; /* First coefficient of the Weierstrass equation. */ @@ -267,7 +267,7 @@ int mpi_ec_curve_point(MPI_POINT point, struct mpi_ec_ctx *ctx); /** * mpi_get_size() - returns max size required to store the number * - * @a: A multi precision integer for which we want to allocate a bufer + * @a: A multi precision integer for which we want to allocate a buffer * * Return: size required to store the number */ diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h index a89955f3cb..88227044fc 100644 --- a/include/linux/mtd/mtd.h +++ b/include/linux/mtd/mtd.h @@ -380,6 +380,8 @@ struct mtd_info { int usecount; struct mtd_debug_info dbg; struct nvmem_device *nvmem; + struct nvmem_device *otp_user_nvmem; + struct nvmem_device *otp_factory_nvmem; /* * Parent device from the MTD partition point of view. diff --git a/include/linux/mtd/onfi.h b/include/linux/mtd/onfi.h index 339ac79856..a7376f9bed 100644 --- a/include/linux/mtd/onfi.h +++ b/include/linux/mtd/onfi.h @@ -11,6 +11,7 @@ #define __LINUX_MTD_ONFI_H #include +#include /* ONFI version bits */ #define ONFI_VERSION_1_0 BIT(1) @@ -24,17 +25,22 @@ #define ONFI_VERSION_4_0 BIT(9) /* ONFI features */ -#define ONFI_FEATURE_16_BIT_BUS (1 << 0) -#define ONFI_FEATURE_EXT_PARAM_PAGE (1 << 7) +#define ONFI_FEATURE_16_BIT_BUS BIT(0) +#define ONFI_FEATURE_NV_DDR BIT(5) +#define ONFI_FEATURE_EXT_PARAM_PAGE BIT(7) /* ONFI timing mode, used in both asynchronous and synchronous mode */ -#define ONFI_TIMING_MODE_0 (1 << 0) -#define ONFI_TIMING_MODE_1 (1 << 1) -#define ONFI_TIMING_MODE_2 (1 << 2) -#define ONFI_TIMING_MODE_3 (1 << 3) -#define ONFI_TIMING_MODE_4 (1 << 4) -#define ONFI_TIMING_MODE_5 (1 << 5) -#define ONFI_TIMING_MODE_UNKNOWN (1 << 6) +#define ONFI_DATA_INTERFACE_SDR 0 +#define ONFI_DATA_INTERFACE_NVDDR BIT(4) +#define ONFI_DATA_INTERFACE_NVDDR2 BIT(5) +#define ONFI_TIMING_MODE_0 BIT(0) +#define ONFI_TIMING_MODE_1 BIT(1) +#define ONFI_TIMING_MODE_2 BIT(2) +#define ONFI_TIMING_MODE_3 BIT(3) +#define ONFI_TIMING_MODE_4 BIT(4) +#define ONFI_TIMING_MODE_5 BIT(5) +#define ONFI_TIMING_MODE_UNKNOWN BIT(6) +#define ONFI_TIMING_MODE_PARAM(x) FIELD_GET(GENMASK(3, 0), (x)) /* ONFI feature number/address */ #define ONFI_FEATURE_NUMBER 256 @@ -49,7 +55,7 @@ #define ONFI_SUBFEATURE_PARAM_LEN 4 /* ONFI optional commands SET/GET FEATURES supported? */ -#define ONFI_OPT_CMD_SET_GET_FEATURES (1 << 2) +#define ONFI_OPT_CMD_SET_GET_FEATURES BIT(2) struct nand_onfi_params { /* rev info and features block */ @@ -93,14 +99,15 @@ struct nand_onfi_params { /* electrical parameter block */ u8 io_pin_capacitance_max; - __le16 async_timing_mode; + __le16 sdr_timing_modes; __le16 program_cache_timing_mode; __le16 t_prog; __le16 t_bers; __le16 t_r; __le16 t_ccs; - __le16 src_sync_timing_mode; - u8 src_ssync_features; + u8 nvddr_timing_modes; + u8 nvddr2_timing_modes; + u8 nvddr_nvddr2_features; __le16 clk_pin_capacitance_typ; __le16 io_pin_capacitance_typ; __le16 input_pin_capacitance_typ; @@ -160,7 +167,9 @@ struct onfi_ext_param_page { * @tBERS: Block erase time * @tR: Page read time * @tCCS: Change column setup time - * @async_timing_mode: Supported asynchronous timing mode + * @fast_tCAD: Command/Address/Data slow or fast delay (NV-DDR only) + * @sdr_timing_modes: Supported asynchronous/SDR timing modes + * @nvddr_timing_modes: Supported source synchronous/NV-DDR timing modes * @vendor_revision: Vendor specific revision number * @vendor: Vendor specific data */ @@ -170,7 +179,9 @@ struct onfi_params { u16 tBERS; u16 tR; u16 tCCS; - u16 async_timing_mode; + bool fast_tCAD; + u16 sdr_timing_modes; + u16 nvddr_timing_modes; u16 vendor_revision; u8 vendor[88]; }; diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h index 29df2f43dc..b2f9dd3cbd 100644 --- a/include/linux/mtd/rawnand.h +++ b/include/linux/mtd/rawnand.h @@ -24,6 +24,7 @@ #include struct nand_chip; +struct gpio_desc; /* The maximum number of NAND chips in an array */ #define NAND_MAX_CHIPS 8 @@ -385,8 +386,8 @@ struct nand_ecc_ctrl { * This struct defines the timing requirements of a SDR NAND chip. * These information can be found in every NAND datasheets and the timings * meaning are described in the ONFI specifications: - * www.onfi.org/~/media/ONFI/specs/onfi_3_1_spec.pdf (chapter 4.15 Timing - * Parameters) + * https://media-www.micron.com/-/media/client/onfi/specs/onfi_3_1_spec.pdf + * (chapter 4.15 Timing Parameters) * * All these timings are expressed in picoseconds. * @@ -471,12 +472,128 @@ struct nand_sdr_timings { u32 tWW_min; }; +/** + * struct nand_nvddr_timings - NV-DDR NAND chip timings + * + * This struct defines the timing requirements of a NV-DDR NAND data interface. + * These information can be found in every NAND datasheets and the timings + * meaning are described in the ONFI specifications: + * https://media-www.micron.com/-/media/client/onfi/specs/onfi_4_1_gold.pdf + * (chapter 4.18.2 NV-DDR) + * + * All these timings are expressed in picoseconds. + * + * @tBERS_max: Block erase time + * @tCCS_min: Change column setup time + * @tPROG_max: Page program time + * @tR_max: Page read time + * @tAC_min: Access window of DQ[7:0] from CLK + * @tAC_max: Access window of DQ[7:0] from CLK + * @tADL_min: ALE to data loading time + * @tCAD_min: Command, Address, Data delay + * @tCAH_min: Command/Address DQ hold time + * @tCALH_min: W/R_n, CLE and ALE hold time + * @tCALS_min: W/R_n, CLE and ALE setup time + * @tCAS_min: Command/address DQ setup time + * @tCEH_min: CE# high hold time + * @tCH_min: CE# hold time + * @tCK_min: Average clock cycle time + * @tCS_min: CE# setup time + * @tDH_min: Data hold time + * @tDQSCK_min: Start of the access window of DQS from CLK + * @tDQSCK_max: End of the access window of DQS from CLK + * @tDQSD_min: Min W/R_n low to DQS/DQ driven by device + * @tDQSD_max: Max W/R_n low to DQS/DQ driven by device + * @tDQSHZ_max: W/R_n high to DQS/DQ tri-state by device + * @tDQSQ_max: DQS-DQ skew, DQS to last DQ valid, per access + * @tDS_min: Data setup time + * @tDSC_min: DQS cycle time + * @tFEAT_max: Busy time for Set Features and Get Features + * @tITC_max: Interface and Timing Mode Change time + * @tQHS_max: Data hold skew factor + * @tRHW_min: Data output cycle to command, address, or data input cycle + * @tRR_min: Ready to RE# low (data only) + * @tRST_max: Device reset time, measured from the falling edge of R/B# to the + * rising edge of R/B#. + * @tWB_max: WE# high to SR[6] low + * @tWHR_min: WE# high to RE# low + * @tWRCK_min: W/R_n low to data output cycle + * @tWW_min: WP# transition to WE# low + */ +struct nand_nvddr_timings { + u64 tBERS_max; + u32 tCCS_min; + u64 tPROG_max; + u64 tR_max; + u32 tAC_min; + u32 tAC_max; + u32 tADL_min; + u32 tCAD_min; + u32 tCAH_min; + u32 tCALH_min; + u32 tCALS_min; + u32 tCAS_min; + u32 tCEH_min; + u32 tCH_min; + u32 tCK_min; + u32 tCS_min; + u32 tDH_min; + u32 tDQSCK_min; + u32 tDQSCK_max; + u32 tDQSD_min; + u32 tDQSD_max; + u32 tDQSHZ_max; + u32 tDQSQ_max; + u32 tDS_min; + u32 tDSC_min; + u32 tFEAT_max; + u32 tITC_max; + u32 tQHS_max; + u32 tRHW_min; + u32 tRR_min; + u32 tRST_max; + u32 tWB_max; + u32 tWHR_min; + u32 tWRCK_min; + u32 tWW_min; +}; + +/* + * While timings related to the data interface itself are mostly different + * between SDR and NV-DDR, timings related to the internal chip behavior are + * common. IOW, the following entries which describe the internal delays have + * the same definition and are shared in both SDR and NV-DDR timing structures: + * - tADL_min + * - tBERS_max + * - tCCS_min + * - tFEAT_max + * - tPROG_max + * - tR_max + * - tRR_min + * - tRST_max + * - tWB_max + * + * The below macros return the value of a given timing, no matter the interface. + */ +#define NAND_COMMON_TIMING_PS(conf, timing_name) \ + nand_interface_is_sdr(conf) ? \ + nand_get_sdr_timings(conf)->timing_name : \ + nand_get_nvddr_timings(conf)->timing_name + +#define NAND_COMMON_TIMING_MS(conf, timing_name) \ + PSEC_TO_MSEC(NAND_COMMON_TIMING_PS((conf), timing_name)) + +#define NAND_COMMON_TIMING_NS(conf, timing_name) \ + PSEC_TO_NSEC(NAND_COMMON_TIMING_PS((conf), timing_name)) + /** * enum nand_interface_type - NAND interface type * @NAND_SDR_IFACE: Single Data Rate interface + * @NAND_NVDDR_IFACE: Double Data Rate interface */ enum nand_interface_type { NAND_SDR_IFACE, + NAND_NVDDR_IFACE, }; /** @@ -485,6 +602,7 @@ enum nand_interface_type { * @timings: The timing information * @timings.mode: Timing mode as defined in the specification * @timings.sdr: Use it when @type is %NAND_SDR_IFACE. + * @timings.nvddr: Use it when @type is %NAND_NVDDR_IFACE. */ struct nand_interface_config { enum nand_interface_type type; @@ -492,10 +610,29 @@ struct nand_interface_config { unsigned int mode; union { struct nand_sdr_timings sdr; + struct nand_nvddr_timings nvddr; }; } timings; }; +/** + * nand_interface_is_sdr - get the interface type + * @conf: The data interface + */ +static bool nand_interface_is_sdr(const struct nand_interface_config *conf) +{ + return conf->type == NAND_SDR_IFACE; +} + +/** + * nand_interface_is_nvddr - get the interface type + * @conf: The data interface + */ +static bool nand_interface_is_nvddr(const struct nand_interface_config *conf) +{ + return conf->type == NAND_NVDDR_IFACE; +} + /** * nand_get_sdr_timings - get SDR timing from data interface * @conf: The data interface @@ -503,12 +640,25 @@ struct nand_interface_config { static inline const struct nand_sdr_timings * nand_get_sdr_timings(const struct nand_interface_config *conf) { - if (conf->type != NAND_SDR_IFACE) + if (!nand_interface_is_sdr(conf)) return ERR_PTR(-EINVAL); return &conf->timings.sdr; } +/** + * nand_get_nvddr_timings - get NV-DDR timing from data interface + * @conf: The data interface + */ +static inline const struct nand_nvddr_timings * +nand_get_nvddr_timings(const struct nand_interface_config *conf) +{ + if (!nand_interface_is_nvddr(conf)) + return ERR_PTR(-EINVAL); + + return &conf->timings.nvddr; +} + /** * struct nand_op_cmd_instr - Definition of a command instruction * @opcode: the command to issue in one cycle @@ -1413,7 +1563,6 @@ void nand_cleanup(struct nand_chip *chip); * instruction and have no physical pin to check it. */ int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms); -struct gpio_desc; int nand_gpio_waitrdy(struct nand_chip *chip, struct gpio_desc *gpiod, unsigned long timeout_ms); @@ -1446,4 +1595,8 @@ static inline void *nand_get_data_buf(struct nand_chip *chip) return chip->data_buf; } +/* Parse the gpio-cs property */ +int rawnand_dt_parse_gpio_cs(struct device *dev, struct gpio_desc ***cs_array, + unsigned int *ncs_array); + #endif /* __LINUX_MTD_RAWNAND_H */ diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h index 98ed91b529..f67457748e 100644 --- a/include/linux/mtd/spi-nor.h +++ b/include/linux/mtd/spi-nor.h @@ -383,6 +383,7 @@ struct spi_nor_flash_parameter; * @read_proto: the SPI protocol for read operations * @write_proto: the SPI protocol for write operations * @reg_proto: the SPI protocol for read_reg/write_reg/erase operations + * @sfdp: the SFDP data of the flash * @controller_ops: SPI NOR controller driver specific operations. * @params: [FLASH-SPECIFIC] SPI NOR flash parameters and settings. * The structure includes legacy flash parameters and @@ -412,6 +413,7 @@ struct spi_nor { bool sst_write_second; u32 flags; enum spi_nor_cmd_ext cmd_ext_type; + struct sfdp *sfdp; const struct spi_nor_controller_ops *controller_ops; diff --git a/include/linux/mtd/spinand.h b/include/linux/mtd/spinand.h index 6bb92f2683..6988956b84 100644 --- a/include/linux/mtd/spinand.h +++ b/include/linux/mtd/spinand.h @@ -170,6 +170,28 @@ struct spinand_op; struct spinand_device; #define SPINAND_MAX_ID_LEN 4 +/* + * For erase, write and read operation, we got the following timings : + * tBERS (erase) 1ms to 4ms + * tPROG 300us to 400us + * tREAD 25us to 100us + * In order to minimize latency, the min value is divided by 4 for the + * initial delay, and dividing by 20 for the poll delay. + * For reset, 5us/10us/500us if the device is respectively + * reading/programming/erasing when the RESET occurs. Since we always + * issue a RESET when the device is IDLE, 5us is selected for both initial + * and poll delay. + */ +#define SPINAND_READ_INITIAL_DELAY_US 6 +#define SPINAND_READ_POLL_DELAY_US 5 +#define SPINAND_RESET_INITIAL_DELAY_US 5 +#define SPINAND_RESET_POLL_DELAY_US 5 +#define SPINAND_WRITE_INITIAL_DELAY_US 75 +#define SPINAND_WRITE_POLL_DELAY_US 15 +#define SPINAND_ERASE_INITIAL_DELAY_US 250 +#define SPINAND_ERASE_POLL_DELAY_US 50 + +#define SPINAND_WAITRDY_TIMEOUT_MS 400 /** * struct spinand_id - SPI NAND id structure diff --git a/include/linux/mv643xx.h b/include/linux/mv643xx.h index 47e5679b48..000b126acf 100644 --- a/include/linux/mv643xx.h +++ b/include/linux/mv643xx.h @@ -918,12 +918,4 @@ extern void mv64340_irq_init(unsigned int base); -/* Watchdog Platform Device, Driver Data */ -#define MV64x60_WDT_NAME "mv64x60_wdt" - -struct mv64x60_wdt_pdata { - int timeout; /* watchdog expiry in seconds, default 10 */ - int bus_clk; /* bus clock in MHz, default 133 */ -}; - #endif /* __ASM_MV643XX_H */ diff --git a/include/linux/namei.h b/include/linux/namei.h index b9605b2b46..be9a2b349c 100644 --- a/include/linux/namei.h +++ b/include/linux/namei.h @@ -36,9 +36,6 @@ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT}; /* internal use only */ #define LOOKUP_PARENT 0x0010 -#define LOOKUP_JUMPED 0x1000 -#define LOOKUP_ROOT 0x2000 -#define LOOKUP_ROOT_GRABBED 0x0008 /* Scoping flags for lookup. */ #define LOOKUP_NO_SYMLINKS 0x010000 /* No symlink crossing. */ diff --git a/include/linux/net/intel/i40e_client.h b/include/linux/net/intel/i40e_client.h index f41387a896..6b3267b497 100644 --- a/include/linux/net/intel/i40e_client.h +++ b/include/linux/net/intel/i40e_client.h @@ -4,6 +4,8 @@ #ifndef _I40E_CLIENT_H_ #define _I40E_CLIENT_H_ +#include + #define I40E_CLIENT_STR_LENGTH 10 /* Client interface version should be updated anytime there is a change in the @@ -48,7 +50,7 @@ struct i40e_qv_info { struct i40e_qvlist_info { u32 num_vectors; - struct i40e_qv_info qv_info[1]; + struct i40e_qv_info qv_info[]; }; @@ -78,6 +80,7 @@ struct i40e_info { u8 lanmac[6]; struct net_device *netdev; struct pci_dev *pcidev; + struct auxiliary_device *aux_dev; u8 __iomem *hw_addr; u8 fid; /* function id, PF id or VF id */ #define I40E_CLIENT_FTYPE_PF 0 @@ -100,6 +103,11 @@ struct i40e_info { u32 fw_build; /* firmware build number */ }; +struct i40e_auxiliary_device { + struct auxiliary_device aux_dev; + struct i40e_info *ldev; +}; + #define I40E_CLIENT_RESET_LEVEL_PF 1 #define I40E_CLIENT_RESET_LEVEL_CORE 2 #define I40E_CLIENT_VSI_FLAG_TCP_ENABLE BIT(1) @@ -187,8 +195,7 @@ static inline bool i40e_client_is_registered(struct i40e_client *client) return test_bit(__I40E_CLIENT_REGISTERED, &client->state); } -/* used by clients */ -int i40e_register_client(struct i40e_client *client); -int i40e_unregister_client(struct i40e_client *client); +void i40e_client_device_register(struct i40e_info *ldev, struct i40e_client *client); +void i40e_client_device_unregister(struct i40e_info *ldev); #endif /* _I40E_CLIENT_H_ */ diff --git a/include/linux/net/intel/iidc.h b/include/linux/net/intel/iidc.h new file mode 100644 index 0000000000..e32f6712ae --- /dev/null +++ b/include/linux/net/intel/iidc.h @@ -0,0 +1,100 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021, Intel Corporation. */ + +#ifndef _IIDC_H_ +#define _IIDC_H_ + +#include +#include +#include +#include +#include +#include + +enum iidc_event_type { + IIDC_EVENT_BEFORE_MTU_CHANGE, + IIDC_EVENT_AFTER_MTU_CHANGE, + IIDC_EVENT_BEFORE_TC_CHANGE, + IIDC_EVENT_AFTER_TC_CHANGE, + IIDC_EVENT_CRIT_ERR, + IIDC_EVENT_NBITS /* must be last */ +}; + +enum iidc_reset_type { + IIDC_PFR, + IIDC_CORER, + IIDC_GLOBR, +}; + +#define IIDC_MAX_USER_PRIORITY 8 + +/* Struct to hold per RDMA Qset info */ +struct iidc_rdma_qset_params { + /* Qset TEID returned to the RDMA driver in + * ice_add_rdma_qset and used by RDMA driver + * for calls to ice_del_rdma_qset + */ + u32 teid; /* Qset TEID */ + u16 qs_handle; /* RDMA driver provides this */ + u16 vport_id; /* VSI index */ + u8 tc; /* TC branch the Qset should belong to */ +}; + +struct iidc_qos_info { + u64 tc_ctx; + u8 rel_bw; + u8 prio_type; + u8 egress_virt_up; + u8 ingress_virt_up; +}; + +/* Struct to pass QoS info */ +struct iidc_qos_params { + struct iidc_qos_info tc_info[IEEE_8021QAZ_MAX_TCS]; + u8 up2tc[IIDC_MAX_USER_PRIORITY]; + u8 vport_relative_bw; + u8 vport_priority_type; + u8 num_tc; +}; + +struct iidc_event { + DECLARE_BITMAP(type, IIDC_EVENT_NBITS); + u32 reg; +}; + +struct ice_pf; + +int ice_add_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset); +int ice_del_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset); +int ice_rdma_request_reset(struct ice_pf *pf, enum iidc_reset_type reset_type); +int ice_rdma_update_vsi_filter(struct ice_pf *pf, u16 vsi_id, bool enable); +void ice_get_qos_params(struct ice_pf *pf, struct iidc_qos_params *qos); + +#define IIDC_RDMA_ROCE_NAME "roce" + +/* Structure representing auxiliary driver tailored information about the core + * PCI dev, each auxiliary driver using the IIDC interface will have an + * instance of this struct dedicated to it. + */ + +struct iidc_auxiliary_dev { + struct auxiliary_device adev; + struct ice_pf *pf; +}; + +/* structure representing the auxiliary driver. This struct is to be + * allocated and populated by the auxiliary driver's owner. The core PCI + * driver will access these ops by performing a container_of on the + * auxiliary_device->dev.driver. + */ +struct iidc_auxiliary_drv { + struct auxiliary_driver adrv; + /* This event_handler is meant to be a blocking call. For instance, + * when a BEFORE_MTU_CHANGE event comes in, the event_handler will not + * return until the auxiliary driver is ready for the MTU change to + * happen. + */ + void (*event_handler)(struct ice_pf *pf, struct iidc_event *event); +}; + +#endif /* _IIDC_H_*/ diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h index 3de38d6a0a..2c6b9e4162 100644 --- a/include/linux/netdev_features.h +++ b/include/linux/netdev_features.h @@ -93,7 +93,7 @@ enum { /* * Add your fresh new feature above and remember to update - * netdev_features_strings[] in net/core/ethtool.c and maybe + * netdev_features_strings[] in net/ethtool/common.c and maybe * some feature mask #defines below. Please also describe it * in Documentation/networking/netdev-features.rst. */ diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 5cbc950b34..eaf5bb008a 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -4114,7 +4114,7 @@ static __always_inline int ____dev_forward_skb(struct net_device *dev, return NET_RX_DROP; } - skb_scrub_packet(skb, true); + skb_scrub_packet(skb, !net_eq(dev_net(dev), dev_net(skb->dev))); skb->priority = 0; return 0; } @@ -4187,8 +4187,8 @@ unsigned long dev_trans_start(struct net_device *dev); void __netdev_watchdog_up(struct net_device *dev); void netif_carrier_on(struct net_device *dev); - void netif_carrier_off(struct net_device *dev); +void netif_carrier_event(struct net_device *dev); /** * netif_dormant_on - mark device as dormant. diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h index f0f3a8354c..3fda1a5087 100644 --- a/include/linux/netfilter.h +++ b/include/linux/netfilter.h @@ -65,8 +65,8 @@ struct nf_hook_ops; struct sock; struct nf_hook_state { - unsigned int hook; - u_int8_t pf; + u8 hook; + u8 pf; struct net_device *in; struct net_device *out; struct sock *sk; @@ -77,12 +77,18 @@ struct nf_hook_state { typedef unsigned int nf_hookfn(void *priv, struct sk_buff *skb, const struct nf_hook_state *state); +enum nf_hook_ops_type { + NF_HOOK_OP_UNDEFINED, + NF_HOOK_OP_NF_TABLES, +}; + struct nf_hook_ops { /* User fills in from here down. */ nf_hookfn *hook; struct net_device *dev; void *priv; - u_int8_t pf; + u8 pf; + enum nf_hook_ops_type hook_ops_type:8; unsigned int hooknum; /* Hooks are ordered in ascending priority. */ int priority; diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h index 515ce53aa2..241e005f29 100644 --- a/include/linux/netfilter/nfnetlink.h +++ b/include/linux/netfilter/nfnetlink.h @@ -11,6 +11,7 @@ struct nfnl_info { struct net *net; struct sock *sk; const struct nlmsghdr *nlh; + const struct nfgenmsg *nfmsg; struct netlink_ext_ack *extack; }; diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index ffba254d20..ce64745948 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -84,6 +84,7 @@ struct nfs_open_context { #define NFS_CONTEXT_RESEND_WRITES (1) #define NFS_CONTEXT_BAD (2) #define NFS_CONTEXT_UNLOCK (3) +#define NFS_CONTEXT_FILE_OPEN (4) int error; struct list_head list; diff --git a/include/linux/nfs_ssc.h b/include/linux/nfs_ssc.h index f5ba0fbff7..222ae8883e 100644 --- a/include/linux/nfs_ssc.h +++ b/include/linux/nfs_ssc.h @@ -8,6 +8,7 @@ */ #include +#include extern struct nfs_ssc_client_ops_tbl nfs_ssc_client_tbl; @@ -52,6 +53,19 @@ static inline void nfs42_ssc_close(struct file *filep) if (nfs_ssc_client_tbl.ssc_nfs4_ops) (*nfs_ssc_client_tbl.ssc_nfs4_ops->sco_close)(filep); } + +struct nfsd4_ssc_umount_item { + struct list_head nsui_list; + bool nsui_busy; + /* + * nsui_refcnt inited to 2, 1 on list and 1 for consumer. Entry + * is removed when refcnt drops to 1 and nsui_expire expires. + */ + refcount_t nsui_refcnt; + unsigned long nsui_expire; + struct vfsmount *nsui_vfsmount; + char nsui_ipaddr[RPC_MAX_ADDRBUFLEN]; +}; #endif /* diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 717ecc87c9..e9698b6278 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -277,6 +277,7 @@ struct nfs4_layoutget { struct nfs4_layoutget_args args; struct nfs4_layoutget_res res; const struct cred *cred; + struct pnfs_layout_hdr *lo; gfp_t gfp_flags; }; diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h index ac398e143c..567c3ddba2 100644 --- a/include/linux/nodemask.h +++ b/include/linux/nodemask.h @@ -119,7 +119,7 @@ static inline const unsigned long *__nodemask_pr_bits(const nodemask_t *m) * The inline keyword gives the compiler room to decide to inline, or * not inline a function as it sees best. However, as these functions * are called in both __init and non-__init functions, if they are not - * inlined we will end up with a section mis-match error (of the type of + * inlined we will end up with a section mismatch error (of the type of * freeable items not being freed). So we must use __always_inline here * to fix the problem. If other functions in the future also end up in * this situation they will also need to be annotated as __always_inline @@ -515,7 +515,7 @@ static inline int node_random(const nodemask_t *mask) #define for_each_online_node(node) for_each_node_state(node, N_ONLINE) /* - * For nodemask scrach area. + * For nodemask scratch area. * NODEMASK_ALLOC(type, name) allocates an object with a specified type and * name. */ @@ -528,7 +528,7 @@ static inline int node_random(const nodemask_t *mask) #define NODEMASK_FREE(m) do {} while (0) #endif -/* A example struture for using NODEMASK_ALLOC, used in mempolicy. */ +/* Example structure for using NODEMASK_ALLOC, used in mempolicy. */ struct nodemask_scratch { nodemask_t mask1; nodemask_t mask2; diff --git a/include/linux/nvme.h b/include/linux/nvme.h index edcbd60b88..b7c4c4130b 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h @@ -636,8 +636,8 @@ struct nvme_lba_range_type { __u8 type; __u8 attributes; __u8 rsvd2[14]; - __u64 slba; - __u64 nlb; + __le64 slba; + __le64 nlb; __u8 guid[16]; __u8 rsvd48[16]; }; @@ -944,6 +944,13 @@ struct nvme_zone_mgmt_recv_cmd { enum { NVME_ZRA_ZONE_REPORT = 0, NVME_ZRASF_ZONE_REPORT_ALL = 0, + NVME_ZRASF_ZONE_STATE_EMPTY = 0x01, + NVME_ZRASF_ZONE_STATE_IMP_OPEN = 0x02, + NVME_ZRASF_ZONE_STATE_EXP_OPEN = 0x03, + NVME_ZRASF_ZONE_STATE_CLOSED = 0x04, + NVME_ZRASF_ZONE_STATE_READONLY = 0x05, + NVME_ZRASF_ZONE_STATE_FULL = 0x06, + NVME_ZRASF_ZONE_STATE_OFFLINE = 0x07, NVME_REPORT_ZONE_PARTIAL = 1, }; @@ -1504,6 +1511,7 @@ enum { NVME_SC_NS_WRITE_PROTECTED = 0x20, NVME_SC_CMD_INTERRUPTED = 0x21, NVME_SC_TRANSIENT_TR_ERR = 0x22, + NVME_SC_INVALID_IO_CMD_SET = 0x2C, NVME_SC_LBA_RANGE = 0x80, NVME_SC_CAP_EXCEEDED = 0x81, diff --git a/include/linux/nvmem-provider.h b/include/linux/nvmem-provider.h index e162b757b6..104505e902 100644 --- a/include/linux/nvmem-provider.h +++ b/include/linux/nvmem-provider.h @@ -25,6 +25,7 @@ enum nvmem_type { NVMEM_TYPE_EEPROM, NVMEM_TYPE_OTP, NVMEM_TYPE_BATTERY_BACKED, + NVMEM_TYPE_FRAM, }; #define NVMEM_DEVID_NONE (-1) @@ -57,6 +58,7 @@ struct nvmem_keepout { * @type: Type of the nvmem storage * @read_only: Device is read-only. * @root_only: Device is accessibly to root only. + * @of_node: If given, this will be used instead of the parent's of_node. * @no_of_node: Device should not use the parent's of_node even if it's !NULL. * @reg_read: Callback to read data. * @reg_write: Callback to write data. @@ -86,6 +88,7 @@ struct nvmem_config { enum nvmem_type type; bool read_only; bool root_only; + struct device_node *of_node; bool no_of_node; nvmem_reg_read_t reg_read; nvmem_reg_write_t reg_write; diff --git a/include/linux/of.h b/include/linux/of.h index d8db8d3592..9c2e71e202 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -1329,6 +1329,12 @@ static inline int of_get_available_child_count(const struct device_node *np) return num; } +#define _OF_DECLARE_STUB(table, name, compat, fn, fn_type) \ + static const struct of_device_id __of_table_##name \ + __attribute__((unused)) \ + = { .compatible = compat, \ + .data = (fn == (fn_type)NULL) ? fn : fn } + #if defined(CONFIG_OF) && !defined(MODULE) #define _OF_DECLARE(table, name, compat, fn, fn_type) \ static const struct of_device_id __of_table_##name \ @@ -1338,10 +1344,7 @@ static inline int of_get_available_child_count(const struct device_node *np) .data = (fn == (fn_type)NULL) ? fn : fn } #else #define _OF_DECLARE(table, name, compat, fn, fn_type) \ - static const struct of_device_id __of_table_##name \ - __attribute__((unused)) \ - = { .compatible = compat, \ - .data = (fn == (fn_type)NULL) ? fn : fn } + _OF_DECLARE_STUB(table, name, compat, fn, fn_type) #endif typedef int (*of_init_fn_2)(struct device_node *, struct device_node *); diff --git a/include/linux/of_address.h b/include/linux/of_address.h index 88bc943405..45598dbec2 100644 --- a/include/linux/of_address.h +++ b/include/linux/of_address.h @@ -51,8 +51,8 @@ void __iomem *of_io_request_and_map(struct device_node *device, * the address space flags too. The PCI version uses a BAR number * instead of an absolute index */ -extern const __be32 *of_get_address(struct device_node *dev, int index, - u64 *size, unsigned int *flags); +extern const __be32 *__of_get_address(struct device_node *dev, int index, int bar_no, + u64 *size, unsigned int *flags); extern int of_pci_range_parser_init(struct of_pci_range_parser *parser, struct device_node *node); @@ -61,6 +61,11 @@ extern int of_pci_dma_range_parser_init(struct of_pci_range_parser *parser, extern struct of_pci_range *of_pci_range_parser_one( struct of_pci_range_parser *parser, struct of_pci_range *range); +extern int of_pci_address_to_resource(struct device_node *dev, int bar, + struct resource *r); +extern int of_pci_range_to_resource(struct of_pci_range *range, + struct device_node *np, + struct resource *res); extern bool of_dma_is_coherent(struct device_node *np); #else /* CONFIG_OF_ADDRESS */ static inline void __iomem *of_io_request_and_map(struct device_node *device, @@ -75,8 +80,8 @@ static inline u64 of_translate_address(struct device_node *np, return OF_BAD_ADDR; } -static inline const __be32 *of_get_address(struct device_node *dev, int index, - u64 *size, unsigned int *flags) +static inline const __be32 *__of_get_address(struct device_node *dev, int index, int bar_no, + u64 *size, unsigned int *flags) { return NULL; } @@ -100,6 +105,19 @@ static inline struct of_pci_range *of_pci_range_parser_one( return NULL; } +static inline int of_pci_address_to_resource(struct device_node *dev, int bar, + struct resource *r) +{ + return -ENOSYS; +} + +static inline int of_pci_range_to_resource(struct of_pci_range *range, + struct device_node *np, + struct resource *res) +{ + return -ENOSYS; +} + static inline bool of_dma_is_coherent(struct device_node *np) { return false; @@ -124,32 +142,16 @@ static inline void __iomem *of_iomap(struct device_node *device, int index) #endif #define of_range_parser_init of_pci_range_parser_init -#if defined(CONFIG_OF_ADDRESS) && defined(CONFIG_PCI) -extern const __be32 *of_get_pci_address(struct device_node *dev, int bar_no, - u64 *size, unsigned int *flags); -extern int of_pci_address_to_resource(struct device_node *dev, int bar, - struct resource *r); -extern int of_pci_range_to_resource(struct of_pci_range *range, - struct device_node *np, - struct resource *res); -#else /* CONFIG_OF_ADDRESS && CONFIG_PCI */ -static inline int of_pci_address_to_resource(struct device_node *dev, int bar, - struct resource *r) +static inline const __be32 *of_get_address(struct device_node *dev, int index, + u64 *size, unsigned int *flags) { - return -ENOSYS; + return __of_get_address(dev, index, -1, size, flags); } -static inline const __be32 *of_get_pci_address(struct device_node *dev, - int bar_no, u64 *size, unsigned int *flags) +static inline const __be32 *of_get_pci_address(struct device_node *dev, int bar_no, + u64 *size, unsigned int *flags) { - return NULL; + return __of_get_address(dev, -1, bar_no, size, flags); } -static inline int of_pci_range_to_resource(struct of_pci_range *range, - struct device_node *np, - struct resource *res) -{ - return -ENOSYS; -} -#endif /* CONFIG_OF_ADDRESS && CONFIG_PCI */ #endif /* __OF_ADDRESS_H */ diff --git a/include/linux/of_iommu.h b/include/linux/of_iommu.h index 16f4b3e87f..55c1eb300a 100644 --- a/include/linux/of_iommu.h +++ b/include/linux/of_iommu.h @@ -2,29 +2,18 @@ #ifndef __OF_IOMMU_H #define __OF_IOMMU_H -#include -#include -#include +struct device; +struct device_node; +struct iommu_ops; #ifdef CONFIG_OF_IOMMU -extern int of_get_dma_window(struct device_node *dn, const char *prefix, - int index, unsigned long *busno, dma_addr_t *addr, - size_t *size); - extern const struct iommu_ops *of_iommu_configure(struct device *dev, struct device_node *master_np, const u32 *id); #else -static inline int of_get_dma_window(struct device_node *dn, const char *prefix, - int index, unsigned long *busno, dma_addr_t *addr, - size_t *size) -{ - return -EINVAL; -} - static inline const struct iommu_ops *of_iommu_configure(struct device *dev, struct device_node *master_np, const u32 *id) diff --git a/include/linux/of_mdio.h b/include/linux/of_mdio.h index 2b05e7f7c2..da633d34ab 100644 --- a/include/linux/of_mdio.h +++ b/include/linux/of_mdio.h @@ -72,6 +72,13 @@ static inline int of_mdiobus_register(struct mii_bus *mdio, struct device_node * return mdiobus_register(mdio); } +static inline int devm_of_mdiobus_register(struct device *dev, + struct mii_bus *mdio, + struct device_node *np) +{ + return devm_mdiobus_register(dev, mdio); +} + static inline struct mdio_device *of_mdio_find_device(struct device_node *np) { return NULL; diff --git a/include/linux/of_reserved_mem.h b/include/linux/of_reserved_mem.h index 8216a41562..4de2a24cad 100644 --- a/include/linux/of_reserved_mem.h +++ b/include/linux/of_reserved_mem.h @@ -27,11 +27,11 @@ struct reserved_mem_ops { typedef int (*reservedmem_of_init_fn)(struct reserved_mem *rmem); +#ifdef CONFIG_OF_RESERVED_MEM + #define RESERVEDMEM_OF_DECLARE(name, compat, init) \ _OF_DECLARE(reservedmem, name, compat, init, reservedmem_of_init_fn) -#ifdef CONFIG_OF_RESERVED_MEM - int of_reserved_mem_device_init_by_idx(struct device *dev, struct device_node *np, int idx); int of_reserved_mem_device_init_by_name(struct device *dev, @@ -39,11 +39,12 @@ int of_reserved_mem_device_init_by_name(struct device *dev, const char *name); void of_reserved_mem_device_release(struct device *dev); -void fdt_init_reserved_mem(void); -void fdt_reserved_mem_save_node(unsigned long node, const char *uname, - phys_addr_t base, phys_addr_t size); struct reserved_mem *of_reserved_mem_lookup(struct device_node *np); #else + +#define RESERVEDMEM_OF_DECLARE(name, compat, init) \ + _OF_DECLARE_STUB(reservedmem, name, compat, init, reservedmem_of_init_fn) + static inline int of_reserved_mem_device_init_by_idx(struct device *dev, struct device_node *np, int idx) { @@ -59,9 +60,6 @@ static inline int of_reserved_mem_device_init_by_name(struct device *dev, static inline void of_reserved_mem_device_release(struct device *pdev) { } -static inline void fdt_init_reserved_mem(void) { } -static inline void fdt_reserved_mem_save_node(unsigned long node, - const char *uname, phys_addr_t base, phys_addr_t size) { } static inline struct reserved_mem *of_reserved_mem_lookup(struct device_node *np) { return NULL; diff --git a/include/linux/oid_registry.h b/include/linux/oid_registry.h index 461b7aa587..3d8db1f6a5 100644 --- a/include/linux/oid_registry.h +++ b/include/linux/oid_registry.h @@ -54,6 +54,10 @@ enum OID { OID_md4, /* 1.2.840.113549.2.4 */ OID_md5, /* 1.2.840.113549.2.5 */ + OID_mskrb5, /* 1.2.840.48018.1.2.2 */ + OID_krb5, /* 1.2.840.113554.1.2.2 */ + OID_krb5u2u, /* 1.2.840.113554.1.2.2.3 */ + /* Microsoft Authenticode & Software Publishing */ OID_msIndirectData, /* 1.3.6.1.4.1.311.2.1.4 */ OID_msStatementType, /* 1.3.6.1.4.1.311.2.1.11 */ @@ -62,6 +66,10 @@ enum OID { OID_msIndividualSPKeyPurpose, /* 1.3.6.1.4.1.311.2.1.21 */ OID_msOutlookExpress, /* 1.3.6.1.4.1.311.16.4 */ + OID_ntlmssp, /* 1.3.6.1.4.1.311.2.2.10 */ + + OID_spnego, /* 1.3.6.1.5.5.2 */ + OID_certAuthInfoAccess, /* 1.3.6.1.5.5.7.1.1 */ OID_sha1, /* 1.3.14.3.2.26 */ OID_id_ansip384r1, /* 1.3.132.0.34 */ diff --git a/include/linux/once_lite.h b/include/linux/once_lite.h new file mode 100644 index 0000000000..861e606b82 --- /dev/null +++ b/include/linux/once_lite.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_ONCE_LITE_H +#define _LINUX_ONCE_LITE_H + +#include + +/* Call a function once. Similar to DO_ONCE(), but does not use jump label + * patching via static keys. + */ +#define DO_ONCE_LITE(func, ...) \ + DO_ONCE_LITE_IF(true, func, ##__VA_ARGS__) +#define DO_ONCE_LITE_IF(condition, func, ...) \ + ({ \ + static bool __section(".data.once") __already_done; \ + bool __ret_do_once = !!(condition); \ + \ + if (unlikely(__ret_do_once && !__already_done)) { \ + __already_done = true; \ + func(__VA_ARGS__); \ + } \ + unlikely(__ret_do_once); \ + }) + +#endif /* _LINUX_ONCE_LITE_H */ diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 04a34c08e0..5922031ffa 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -137,6 +137,9 @@ enum pageflags { #endif #ifdef CONFIG_64BIT PG_arch_2, +#endif +#ifdef CONFIG_KASAN_HW_TAGS + PG_skip_kasan_poison, #endif __NR_PAGEFLAGS, @@ -177,17 +180,17 @@ enum pageflags { #ifndef __GENERATING_BOUNDS_H -struct page; /* forward declaration */ - -static inline struct page *compound_head(struct page *page) +static inline unsigned long _compound_head(const struct page *page) { unsigned long head = READ_ONCE(page->compound_head); if (unlikely(head & 1)) - return (struct page *) (head - 1); - return page; + return head - 1; + return (unsigned long)page; } +#define compound_head(page) ((typeof(page))_compound_head(page)) + static __always_inline int PageTail(struct page *page) { return READ_ONCE(page->compound_head) & 1; @@ -443,6 +446,12 @@ TESTCLEARFLAG(Young, young, PF_ANY) PAGEFLAG(Idle, idle, PF_ANY) #endif +#ifdef CONFIG_KASAN_HW_TAGS +PAGEFLAG(SkipKASanPoison, skip_kasan_poison, PF_HEAD) +#else +PAGEFLAG_FALSE(SkipKASanPoison) +#endif + /* * PageReported() is used to track reported free pages within the Buddy * allocator. We can use the non-atomic version of the test and set @@ -694,6 +703,18 @@ PAGEFLAG_FALSE(DoubleMap) TESTSCFLAG_FALSE(DoubleMap) #endif +/* + * Check if a page is currently marked HWPoisoned. Note that this check is + * best effort only and inherently racy: there is no way to synchronize with + * failing hardware. + */ +static inline bool is_page_hwpoison(struct page *page) +{ + if (PageHWPoison(page)) + return true; + return PageHuge(page) && PageHWPoison(compound_head(page)); +} + /* * For pages that are never mapped to userspace (and aren't PageSlab), * page_type may be used. Because it is initialised to -1, we invert the @@ -757,9 +778,19 @@ PAGE_TYPE_OPS(Buddy, buddy) * relies on this feature is aware that re-onlining the memory block will * require to re-set the pages PageOffline() and not giving them to the * buddy via online_page_callback_t. + * + * There are drivers that mark a page PageOffline() and expect there won't be + * any further access to page content. PFN walkers that read content of random + * pages should check PageOffline() and synchronize with such drivers using + * page_offline_freeze()/page_offline_thaw(). */ PAGE_TYPE_OPS(Offline, offline) +extern void page_offline_freeze(void); +extern void page_offline_thaw(void); +extern void page_offline_begin(void); +extern void page_offline_end(void); + /* * Marks pages in use as page tables. */ diff --git a/include/linux/page_owner.h b/include/linux/page_owner.h index 3468794f83..719bfe5108 100644 --- a/include/linux/page_owner.h +++ b/include/linux/page_owner.h @@ -14,7 +14,7 @@ extern void __set_page_owner(struct page *page, extern void __split_page_owner(struct page *page, unsigned int nr); extern void __copy_page_owner(struct page *oldpage, struct page *newpage); extern void __set_page_owner_migrate_reason(struct page *page, int reason); -extern void __dump_page_owner(struct page *page); +extern void __dump_page_owner(const struct page *page); extern void pagetypeinfo_showmixedcount_print(struct seq_file *m, pg_data_t *pgdat, struct zone *zone); @@ -46,7 +46,7 @@ static inline void set_page_owner_migrate_reason(struct page *page, int reason) if (static_branch_unlikely(&page_owner_inited)) __set_page_owner_migrate_reason(page, reason); } -static inline void dump_page_owner(struct page *page) +static inline void dump_page_owner(const struct page *page) { if (static_branch_unlikely(&page_owner_inited)) __dump_page_owner(page); @@ -69,7 +69,7 @@ static inline void copy_page_owner(struct page *oldpage, struct page *newpage) static inline void set_page_owner_migrate_reason(struct page *page, int reason) { } -static inline void dump_page_owner(struct page *page) +static inline void dump_page_owner(const struct page *page) { } #endif /* CONFIG_PAGE_OWNER */ diff --git a/include/linux/page_ref.h b/include/linux/page_ref.h index f3318f34fc..7ad46f45df 100644 --- a/include/linux/page_ref.h +++ b/include/linux/page_ref.h @@ -62,12 +62,12 @@ static inline void __page_ref_unfreeze(struct page *page, int v) #endif -static inline int page_ref_count(struct page *page) +static inline int page_ref_count(const struct page *page) { return atomic_read(&page->_refcount); } -static inline int page_count(struct page *page) +static inline int page_count(const struct page *page) { return atomic_read(&compound_head(page)->_refcount); } diff --git a/include/linux/page_reporting.h b/include/linux/page_reporting.h index 3b99e0ec24..fe648dfa3a 100644 --- a/include/linux/page_reporting.h +++ b/include/linux/page_reporting.h @@ -18,6 +18,9 @@ struct page_reporting_dev_info { /* Current state of page reporting */ atomic_t state; + + /* Minimal order of page reporting */ + unsigned int order; }; /* Tear-down and bring-up for page reporting devices */ diff --git a/include/linux/pageblock-flags.h b/include/linux/pageblock-flags.h index fff52ad370..973fd731a5 100644 --- a/include/linux/pageblock-flags.h +++ b/include/linux/pageblock-flags.h @@ -54,7 +54,7 @@ extern unsigned int pageblock_order; /* Forward declaration */ struct page; -unsigned long get_pfnblock_flags_mask(struct page *page, +unsigned long get_pfnblock_flags_mask(const struct page *page, unsigned long pfn, unsigned long mask); diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 0f1b34dbf3..ed02aa5222 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -702,6 +702,10 @@ int wait_on_page_writeback_killable(struct page *page); extern void end_page_writeback(struct page *page); void wait_for_stable_page(struct page *page); +void __set_page_dirty(struct page *, struct address_space *, int warn); +int __set_page_dirty_nobuffers(struct page *page); +int __set_page_dirty_no_writeback(struct page *page); + void page_endio(struct page *page, bool is_write, int err); /** diff --git a/include/linux/panic.h b/include/linux/panic.h new file mode 100644 index 0000000000..f5844908a0 --- /dev/null +++ b/include/linux/panic.h @@ -0,0 +1,98 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_PANIC_H +#define _LINUX_PANIC_H + +#include +#include + +struct pt_regs; + +extern long (*panic_blink)(int state); +__printf(1, 2) +void panic(const char *fmt, ...) __noreturn __cold; +void nmi_panic(struct pt_regs *regs, const char *msg); +extern void oops_enter(void); +extern void oops_exit(void); +extern bool oops_may_print(void); + +#ifdef CONFIG_SMP +extern unsigned int sysctl_oops_all_cpu_backtrace; +#else +#define sysctl_oops_all_cpu_backtrace 0 +#endif /* CONFIG_SMP */ + +extern int panic_timeout; +extern unsigned long panic_print; +extern int panic_on_oops; +extern int panic_on_unrecovered_nmi; +extern int panic_on_io_nmi; +extern int panic_on_warn; + +extern unsigned long panic_on_taint; +extern bool panic_on_taint_nousertaint; + +extern int sysctl_panic_on_rcu_stall; +extern int sysctl_max_rcu_stall_to_panic; +extern int sysctl_panic_on_stackoverflow; + +extern bool crash_kexec_post_notifiers; + +/* + * panic_cpu is used for synchronizing panic() and crash_kexec() execution. It + * holds a CPU number which is executing panic() currently. A value of + * PANIC_CPU_INVALID means no CPU has entered panic() or crash_kexec(). + */ +extern atomic_t panic_cpu; +#define PANIC_CPU_INVALID -1 + +/* + * Only to be used by arch init code. If the user over-wrote the default + * CONFIG_PANIC_TIMEOUT, honor it. + */ +static inline void set_arch_panic_timeout(int timeout, int arch_default_timeout) +{ + if (panic_timeout == arch_default_timeout) + panic_timeout = timeout; +} + +/* This cannot be an enum because some may be used in assembly source. */ +#define TAINT_PROPRIETARY_MODULE 0 +#define TAINT_FORCED_MODULE 1 +#define TAINT_CPU_OUT_OF_SPEC 2 +#define TAINT_FORCED_RMMOD 3 +#define TAINT_MACHINE_CHECK 4 +#define TAINT_BAD_PAGE 5 +#define TAINT_USER 6 +#define TAINT_DIE 7 +#define TAINT_OVERRIDDEN_ACPI_TABLE 8 +#define TAINT_WARN 9 +#define TAINT_CRAP 10 +#define TAINT_FIRMWARE_WORKAROUND 11 +#define TAINT_OOT_MODULE 12 +#define TAINT_UNSIGNED_MODULE 13 +#define TAINT_SOFTLOCKUP 14 +#define TAINT_LIVEPATCH 15 +#define TAINT_AUX 16 +#define TAINT_RANDSTRUCT 17 +#define TAINT_FLAGS_COUNT 18 +#define TAINT_FLAGS_MAX ((1UL << TAINT_FLAGS_COUNT) - 1) + +struct taint_flag { + char c_true; /* character printed when tainted */ + char c_false; /* character printed when not tainted */ + bool module; /* also show as a per-module taint flag */ +}; + +extern const struct taint_flag taint_flags[TAINT_FLAGS_COUNT]; + +enum lockdep_ok { + LOCKDEP_STILL_OK, + LOCKDEP_NOW_UNRELIABLE, +}; + +extern const char *print_tainted(void); +extern void add_taint(unsigned flag, enum lockdep_ok); +extern int test_taint(unsigned flag); +extern unsigned long get_taint(void); + +#endif /* _LINUX_PANIC_H */ diff --git a/include/linux/panic_notifier.h b/include/linux/panic_notifier.h new file mode 100644 index 0000000000..41e32483d7 --- /dev/null +++ b/include/linux/panic_notifier.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_PANIC_NOTIFIERS_H +#define _LINUX_PANIC_NOTIFIERS_H + +#include +#include + +extern struct atomic_notifier_head panic_notifier_list; + +extern bool crash_kexec_post_notifiers; + +#endif /* _LINUX_PANIC_NOTIFIERS_H */ diff --git a/include/linux/pci-ecam.h b/include/linux/pci-ecam.h index fbdadd4d83..adea5a4771 100644 --- a/include/linux/pci-ecam.h +++ b/include/linux/pci-ecam.h @@ -55,6 +55,7 @@ struct pci_ecam_ops { struct pci_config_window { struct resource res; struct resource busr; + unsigned int bus_shift; void *priv; const struct pci_ecam_ops *ops; union { diff --git a/include/linux/pci-ep-cfs.h b/include/linux/pci-ep-cfs.h index 662881335c..3e2140d7e3 100644 --- a/include/linux/pci-ep-cfs.h +++ b/include/linux/pci-ep-cfs.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0+ */ -/** +/* * PCI Endpoint ConfigFS header file * * Copyright (C) 2017 Texas Instruments diff --git a/include/linux/pci-epc.h b/include/linux/pci-epc.h index b82c9b100e..50a649d33e 100644 --- a/include/linux/pci-epc.h +++ b/include/linux/pci-epc.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/** +/* * PCI Endpoint *Controller* (EPC) header file * * Copyright (C) 2017 Texas Instruments @@ -58,6 +58,7 @@ pci_epc_interface_string(enum pci_epc_interface_type type) * @map_msi_irq: ops to map physical address to MSI address and return MSI data * @start: ops to start the PCI link * @stop: ops to stop the PCI link + * @get_features: ops to get the features supported by the EPC * @owner: the module owner containing the ops */ struct pci_epc_ops { @@ -150,6 +151,8 @@ struct pci_epc { /** * struct pci_epc_features - features supported by a EPC device per function * @linkup_notifier: indicate if the EPC device can notify EPF driver on link up + * @core_init_notifier: indicate cores that can notify about their availability + * for initialization * @msi_capable: indicate if the endpoint function has MSI capability * @msix_capable: indicate if the endpoint function has MSI-X capability * @reserved_bar: bitmap to indicate reserved BAR unavailable to function driver diff --git a/include/linux/pci-epf.h b/include/linux/pci-epf.h index 6833e2160e..2debc27ba9 100644 --- a/include/linux/pci-epf.h +++ b/include/linux/pci-epf.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/** +/* * PCI Endpoint *Function* (EPF) header file * * Copyright (C) 2017 Texas Instruments @@ -102,6 +102,8 @@ struct pci_epf_driver { * @phys_addr: physical address that should be mapped to the BAR * @addr: virtual address corresponding to the @phys_addr * @size: the size of the address space present in BAR + * @barno: BAR number + * @flags: flags that are set for the BAR */ struct pci_epf_bar { dma_addr_t phys_addr; @@ -118,6 +120,7 @@ struct pci_epf_bar { * @header: represents standard configuration header * @bar: represents the BAR of EPF device * @msi_interrupts: number of MSI interrupts required by this function + * @msix_interrupts: number of MSI-X interrupts required by this function * @func_no: unique function number within this endpoint device * @epc: the EPC device to which this EPF device is bound * @driver: the EPF driver to which this EPF device is bound diff --git a/include/linux/pci.h b/include/linux/pci.h index 2430650422..540b377ca8 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -497,7 +497,7 @@ struct pci_dev { u16 pasid_features; #endif #ifdef CONFIG_PCI_P2PDMA - struct pci_p2pdma *p2pdma; + struct pci_p2pdma __rcu *p2pdma; #endif u16 acs_cap; /* ACS Capability offset */ phys_addr_t rom; /* Physical address if not from BAR */ @@ -862,6 +862,8 @@ struct module; * MSI-X vectors available for distribution to the VFs. * @err_handler: See Documentation/PCI/pci-error-recovery.rst * @groups: Sysfs attribute groups. + * @dev_groups: Attributes attached to the device that will be + * created once it is bound to the driver. * @driver: Driver model structure. * @dynids: List of dynamically added device IDs. */ @@ -879,6 +881,7 @@ struct pci_driver { u32 (*sriov_get_vf_total_msix)(struct pci_dev *pf); const struct pci_error_handlers *err_handler; const struct attribute_group **groups; + const struct attribute_group **dev_groups; struct device_driver driver; struct pci_dynids dynids; }; @@ -1621,6 +1624,9 @@ void pci_cfg_access_lock(struct pci_dev *dev); bool pci_cfg_access_trylock(struct pci_dev *dev); void pci_cfg_access_unlock(struct pci_dev *dev); +int pci_dev_trylock(struct pci_dev *dev); +void pci_dev_unlock(struct pci_dev *dev); + /* * PCI domain support. Sometimes called PCI segment (eg by ACPI), * a PCI domain is defined to be a set of PCI buses which share @@ -1772,6 +1778,10 @@ static inline int pci_request_regions(struct pci_dev *dev, const char *res_name) { return -EIO; } static inline void pci_release_regions(struct pci_dev *dev) { } +static inline int pci_register_io_range(struct fwnode_handle *fwnode, + phys_addr_t addr, resource_size_t size) +{ return -EINVAL; } + static inline unsigned long pci_address_to_pio(phys_addr_t addr) { return -1; } static inline struct pci_bus *pci_find_next_bus(const struct pci_bus *from) diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h index b482e42d71..2dac431d94 100644 --- a/include/linux/pci_hotplug.h +++ b/include/linux/pci_hotplug.h @@ -50,6 +50,8 @@ struct hotplug_slot_ops { /** * struct hotplug_slot - used to register a physical slot with the hotplug pci core * @ops: pointer to the &struct hotplug_slot_ops to be used for this slot + * @slot_list: internal list used to track hotplug PCI slots + * @pci_slot: represents a physical slot * @owner: The module owner of this structure * @mod_name: The module name (KBUILD_MODNAME) of this structure */ diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 4c3fa5293d..4bac1831de 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -555,6 +555,7 @@ #define PCI_DEVICE_ID_AMD_17H_M60H_DF_F3 0x144b #define PCI_DEVICE_ID_AMD_17H_M70H_DF_F3 0x1443 #define PCI_DEVICE_ID_AMD_19H_DF_F3 0x1653 +#define PCI_DEVICE_ID_AMD_19H_M50H_DF_F3 0x166d #define PCI_DEVICE_ID_AMD_CNB17H_F3 0x1703 #define PCI_DEVICE_ID_AMD_LANCE 0x2000 #define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001 @@ -631,6 +632,8 @@ #define PCI_DEVICE_ID_DELL_RAC4 0x0012 #define PCI_DEVICE_ID_DELL_PERC5 0x0015 +#define PCI_SUBVENDOR_ID_DELL 0x1028 + #define PCI_VENDOR_ID_MATROX 0x102B #define PCI_DEVICE_ID_MATROX_MGA_2 0x0518 #define PCI_DEVICE_ID_MATROX_MIL 0x0519 diff --git a/include/linux/pcs/pcs-xpcs.h b/include/linux/pcs/pcs-xpcs.h index 2cb5188a7e..add077a81b 100644 --- a/include/linux/pcs/pcs-xpcs.h +++ b/include/linux/pcs/pcs-xpcs.h @@ -10,37 +10,33 @@ #include #include +#define NXP_SJA1105_XPCS_ID 0x00000010 +#define NXP_SJA1110_XPCS_ID 0x00000020 + /* AN mode */ #define DW_AN_C73 1 #define DW_AN_C37_SGMII 2 +#define DW_2500BASEX 3 -struct mdio_xpcs_args { - __ETHTOOL_DECLARE_LINK_MODE_MASK(supported); - struct mii_bus *bus; - int addr; - int an_mode; +struct xpcs_id; + +struct dw_xpcs { + struct mdio_device *mdiodev; + const struct xpcs_id *id; + struct phylink_pcs pcs; }; -struct mdio_xpcs_ops { - int (*validate)(struct mdio_xpcs_args *xpcs, - unsigned long *supported, - struct phylink_link_state *state); - int (*config)(struct mdio_xpcs_args *xpcs, - const struct phylink_link_state *state); - int (*get_state)(struct mdio_xpcs_args *xpcs, - struct phylink_link_state *state); - int (*link_up)(struct mdio_xpcs_args *xpcs, int speed, - phy_interface_t interface); - int (*probe)(struct mdio_xpcs_args *xpcs, phy_interface_t interface); -}; - -#if IS_ENABLED(CONFIG_PCS_XPCS) -struct mdio_xpcs_ops *mdio_xpcs_get_ops(void); -#else -static inline struct mdio_xpcs_ops *mdio_xpcs_get_ops(void) -{ - return NULL; -} -#endif +int xpcs_get_an_mode(struct dw_xpcs *xpcs, phy_interface_t interface); +void xpcs_link_up(struct phylink_pcs *pcs, unsigned int mode, + phy_interface_t interface, int speed, int duplex); +int xpcs_do_config(struct dw_xpcs *xpcs, phy_interface_t interface, + unsigned int mode); +void xpcs_validate(struct dw_xpcs *xpcs, unsigned long *supported, + struct phylink_link_state *state); +int xpcs_config_eee(struct dw_xpcs *xpcs, int mult_fact_100ns, + int enable); +struct dw_xpcs *xpcs_create(struct mdio_device *mdiodev, + phy_interface_t interface); +void xpcs_destroy(struct dw_xpcs *xpcs); #endif /* __LINUX_PCS_XPCS_H */ diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h index dff7040f62..af1071535d 100644 --- a/include/linux/percpu-defs.h +++ b/include/linux/percpu-defs.h @@ -412,7 +412,7 @@ do { \ * instead. * * If there is no other protection through preempt disable and/or disabling - * interupts then one of these RMW operations can show unexpected behavior + * interrupts then one of these RMW operations can show unexpected behavior * because the execution thread was rescheduled on another processor or an * interrupt occurred and the same percpu variable was modified from the * interrupt context. diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index f5a6a2f069..2d510ad750 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -1576,6 +1576,12 @@ static struct perf_pmu_events_attr _var = { \ .event_str = _str, \ }; +#define PMU_EVENT_ATTR_ID(_name, _show, _id) \ + (&((struct perf_pmu_events_attr[]) { \ + { .attr = __ATTR(_name, 0444, _show, NULL), \ + .id = _id, } \ + })[0].attr.attr) + #define PMU_FORMAT_ATTR(_name, _format) \ static ssize_t \ _name##_show(struct device *dev, \ diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index a43047b103..d147480cde 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -28,6 +28,24 @@ #define USER_PGTABLES_CEILING 0UL #endif +/* + * This defines the first usable user address. Platforms + * can override its value with custom FIRST_USER_ADDRESS + * defined in their respective . + */ +#ifndef FIRST_USER_ADDRESS +#define FIRST_USER_ADDRESS 0UL +#endif + +/* + * This defines the generic helper for accessing PMD page + * table page. Although platforms can still override this + * via their respective . + */ +#ifndef pmd_pgtable +#define pmd_pgtable(pmd) pmd_page(pmd) +#endif + /* * A page table page can be thought of an array like this: pXd_t[PTRS_PER_PxD] * @@ -88,7 +106,7 @@ static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address) #ifndef pmd_offset static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) { - return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address); + return pud_pgtable(*pud) + pmd_index(address); } #define pmd_offset pmd_offset #endif @@ -96,7 +114,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) #ifndef pud_offset static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address) { - return (pud_t *)p4d_page_vaddr(*p4d) + pud_index(address); + return p4d_pgtable(*p4d) + pud_index(address); } #define pud_offset pud_offset #endif @@ -1379,10 +1397,34 @@ static inline int p4d_clear_huge(p4d_t *p4d) } #endif /* !__PAGETABLE_P4D_FOLDED */ +#ifndef __PAGETABLE_PUD_FOLDED int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot); -int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot); int pud_clear_huge(pud_t *pud); +#else +static inline int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot) +{ + return 0; +} +static inline int pud_clear_huge(pud_t *pud) +{ + return 0; +} +#endif /* !__PAGETABLE_PUD_FOLDED */ + +#ifndef __PAGETABLE_PMD_FOLDED +int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot); int pmd_clear_huge(pmd_t *pmd); +#else +static inline int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot) +{ + return 0; +} +static inline int pmd_clear_huge(pmd_t *pmd) +{ + return 0; +} +#endif /* !__PAGETABLE_PMD_FOLDED */ + int p4d_free_pud_page(p4d_t *p4d, unsigned long addr); int pud_free_pmd_page(pud_t *pud, unsigned long addr); int pmd_free_pte_page(pmd_t *pmd, unsigned long addr); @@ -1592,4 +1634,26 @@ typedef unsigned int pgtbl_mod_mask; #define pte_leaf_size(x) PAGE_SIZE #endif +/* + * Some architectures have MMUs that are configurable or selectable at boot + * time. These lead to variable PTRS_PER_x. For statically allocated arrays it + * helps to have a static maximum value. + */ + +#ifndef MAX_PTRS_PER_PTE +#define MAX_PTRS_PER_PTE PTRS_PER_PTE +#endif + +#ifndef MAX_PTRS_PER_PMD +#define MAX_PTRS_PER_PMD PTRS_PER_PMD +#endif + +#ifndef MAX_PTRS_PER_PUD +#define MAX_PTRS_PER_PUD PTRS_PER_PUD +#endif + +#ifndef MAX_PTRS_PER_P4D +#define MAX_PTRS_PER_P4D PTRS_PER_P4D +#endif + #endif /* _LINUX_PGTABLE_H */ diff --git a/include/linux/phy.h b/include/linux/phy.h index 852743f07e..3b80dc3ed6 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -93,6 +93,7 @@ extern const int phy_10gbit_features_array[1]; * @PHY_INTERFACE_MODE_TBI: Ten Bit Interface * @PHY_INTERFACE_MODE_REVMII: Reverse Media Independent Interface * @PHY_INTERFACE_MODE_RMII: Reduced Media Independent Interface + * @PHY_INTERFACE_MODE_REVRMII: Reduced Media Independent Interface in PHY role * @PHY_INTERFACE_MODE_RGMII: Reduced gigabit media-independent interface * @PHY_INTERFACE_MODE_RGMII_ID: RGMII with Internal RX+TX delay * @PHY_INTERFACE_MODE_RGMII_RXID: RGMII with Internal RX delay @@ -111,6 +112,7 @@ extern const int phy_10gbit_features_array[1]; * @PHY_INTERFACE_MODE_RXAUI: Reduced XAUI * @PHY_INTERFACE_MODE_XAUI: 10 Gigabit Attachment Unit Interface * @PHY_INTERFACE_MODE_10GBASER: 10G BaseR + * @PHY_INTERFACE_MODE_25GBASER: 25G BaseR * @PHY_INTERFACE_MODE_USXGMII: Universal Serial 10GE MII * @PHY_INTERFACE_MODE_10GKR: 10GBASE-KR - with Clause 73 AN * @PHY_INTERFACE_MODE_MAX: Book keeping @@ -126,6 +128,7 @@ typedef enum { PHY_INTERFACE_MODE_TBI, PHY_INTERFACE_MODE_REVMII, PHY_INTERFACE_MODE_RMII, + PHY_INTERFACE_MODE_REVRMII, PHY_INTERFACE_MODE_RGMII, PHY_INTERFACE_MODE_RGMII_ID, PHY_INTERFACE_MODE_RGMII_RXID, @@ -145,6 +148,7 @@ typedef enum { PHY_INTERFACE_MODE_XAUI, /* 10GBASE-R, XFI, SFI - single lane 10G Serdes */ PHY_INTERFACE_MODE_10GBASER, + PHY_INTERFACE_MODE_25GBASER, PHY_INTERFACE_MODE_USXGMII, /* 10GBASE-KR - with Clause 73 AN */ PHY_INTERFACE_MODE_10GKR, @@ -185,6 +189,8 @@ static inline const char *phy_modes(phy_interface_t interface) return "rev-mii"; case PHY_INTERFACE_MODE_RMII: return "rmii"; + case PHY_INTERFACE_MODE_REVRMII: + return "rev-rmii"; case PHY_INTERFACE_MODE_RGMII: return "rgmii"; case PHY_INTERFACE_MODE_RGMII_ID: @@ -219,6 +225,8 @@ static inline const char *phy_modes(phy_interface_t interface) return "xaui"; case PHY_INTERFACE_MODE_10GBASER: return "10gbase-r"; + case PHY_INTERFACE_MODE_25GBASER: + return "25gbase-r"; case PHY_INTERFACE_MODE_USXGMII: return "usxgmii"; case PHY_INTERFACE_MODE_10GKR: @@ -1373,10 +1381,42 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, u32 phy_id, bool is_c45, struct phy_c45_device_ids *c45_ids); #if IS_ENABLED(CONFIG_PHYLIB) +int fwnode_get_phy_id(struct fwnode_handle *fwnode, u32 *phy_id); +struct mdio_device *fwnode_mdio_find_device(struct fwnode_handle *fwnode); +struct phy_device *fwnode_phy_find_device(struct fwnode_handle *phy_fwnode); +struct phy_device *device_phy_find_device(struct device *dev); +struct fwnode_handle *fwnode_get_phy_node(struct fwnode_handle *fwnode); struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45); int phy_device_register(struct phy_device *phy); void phy_device_free(struct phy_device *phydev); #else +static inline int fwnode_get_phy_id(struct fwnode_handle *fwnode, u32 *phy_id) +{ + return 0; +} +static inline +struct mdio_device *fwnode_mdio_find_device(struct fwnode_handle *fwnode) +{ + return 0; +} + +static inline +struct phy_device *fwnode_phy_find_device(struct fwnode_handle *phy_fwnode) +{ + return NULL; +} + +static inline struct phy_device *device_phy_find_device(struct device *dev) +{ + return NULL; +} + +static inline +struct fwnode_handle *fwnode_get_phy_node(struct fwnode_handle *fwnode) +{ + return NULL; +} + static inline struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45) { diff --git a/include/linux/phy/phy.h b/include/linux/phy/phy.h index 0ed434d021..f3286f4cd3 100644 --- a/include/linux/phy/phy.h +++ b/include/linux/phy/phy.h @@ -125,7 +125,7 @@ struct phy_ops { /** * struct phy_attrs - represents phy attributes * @bus_width: Data path width implemented by PHY - * @max_link_rate: Maximum link rate supported by PHY (in Mbps) + * @max_link_rate: Maximum link rate supported by PHY (units to be decided by producer and consumer) * @mode: PHY mode */ struct phy_attrs { diff --git a/include/linux/phy/tegra/xusb.h b/include/linux/phy/tegra/xusb.h index 71d9569354..3a35e74cdc 100644 --- a/include/linux/phy/tegra/xusb.h +++ b/include/linux/phy/tegra/xusb.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2016-2020, NVIDIA CORPORATION. All rights reserved. */ #ifndef PHY_TEGRA_XUSB_H @@ -8,6 +8,7 @@ struct tegra_xusb_padctl; struct device; +enum usb_device_speed; struct tegra_xusb_padctl *tegra_xusb_padctl_get(struct device *dev); void tegra_xusb_padctl_put(struct tegra_xusb_padctl *padctl); @@ -23,4 +24,11 @@ int tegra_xusb_padctl_set_vbus_override(struct tegra_xusb_padctl *padctl, int tegra_phy_xusb_utmi_port_reset(struct phy *phy); int tegra_xusb_padctl_get_usb3_companion(struct tegra_xusb_padctl *padctl, unsigned int port); +int tegra_xusb_padctl_enable_phy_sleepwalk(struct tegra_xusb_padctl *padctl, struct phy *phy, + enum usb_device_speed speed); +int tegra_xusb_padctl_disable_phy_sleepwalk(struct tegra_xusb_padctl *padctl, struct phy *phy); +int tegra_xusb_padctl_enable_phy_wake(struct tegra_xusb_padctl *padctl, struct phy *phy); +int tegra_xusb_padctl_disable_phy_wake(struct tegra_xusb_padctl *padctl, struct phy *phy); +bool tegra_xusb_padctl_remote_wake_detected(struct tegra_xusb_padctl *padctl, struct phy *phy); + #endif /* PHY_TEGRA_XUSB_H */ diff --git a/include/linux/phylink.h b/include/linux/phylink.h index fd2acfd9b5..afb3ded0b6 100644 --- a/include/linux/phylink.h +++ b/include/linux/phylink.h @@ -441,6 +441,9 @@ void phylink_destroy(struct phylink *); int phylink_connect_phy(struct phylink *, struct phy_device *); int phylink_of_phy_connect(struct phylink *, struct device_node *, u32 flags); +int phylink_fwnode_phy_connect(struct phylink *pl, + struct fwnode_handle *fwnode, + u32 flags); void phylink_disconnect_phy(struct phylink *); void phylink_mac_change(struct phylink *, bool up); diff --git a/include/linux/pinctrl/pinconf-generic.h b/include/linux/pinctrl/pinconf-generic.h index e18ab3d590..eee0e39485 100644 --- a/include/linux/pinctrl/pinconf-generic.h +++ b/include/linux/pinctrl/pinconf-generic.h @@ -81,28 +81,28 @@ struct pinctrl_map; * passed in the argument on a custom form, else just use argument 1 * to indicate low power mode, argument 0 turns low power mode off. * @PIN_CONFIG_MODE_PWM: this will configure the pin for PWM + * @PIN_CONFIG_OUTPUT: this will configure the pin as an output and drive a + * value on the line. Use argument 1 to indicate high level, argument 0 to + * indicate low level. (Please see Documentation/driver-api/pin-control.rst, + * section "GPIO mode pitfalls" for a discussion around this parameter.) * @PIN_CONFIG_OUTPUT_ENABLE: this will enable the pin's output mode * without driving a value there. For most platforms this reduces to * enable the output buffers and then let the pin controller current * configuration (eg. the currently selected mux function) drive values on * the line. Use argument 1 to enable output mode, argument 0 to disable * it. - * @PIN_CONFIG_OUTPUT: this will configure the pin as an output and drive a - * value on the line. Use argument 1 to indicate high level, argument 0 to - * indicate low level. (Please see Documentation/driver-api/pinctl.rst, - * section "GPIO mode pitfalls" for a discussion around this parameter.) * @PIN_CONFIG_PERSIST_STATE: retain pin state across sleep or controller reset * @PIN_CONFIG_POWER_SOURCE: if the pin can select between different power * supplies, the argument to this parameter (on a custom format) tells * the driver which alternative power source to use. - * @PIN_CONFIG_SLEEP_HARDWARE_STATE: indicate this is sleep related state. - * @PIN_CONFIG_SLEW_RATE: if the pin can select slew rate, the argument to - * this parameter (on a custom format) tells the driver which alternative - * slew rate to use. * @PIN_CONFIG_SKEW_DELAY: if the pin has programmable skew rate (on inputs) * or latch delay (on outputs) this parameter (in a custom format) * specifies the clock skew or latch delay. It typically controls how * many double inverters are put in front of the line. + * @PIN_CONFIG_SLEEP_HARDWARE_STATE: indicate this is sleep related state. + * @PIN_CONFIG_SLEW_RATE: if the pin can select slew rate, the argument to + * this parameter (on a custom format) tells the driver which alternative + * slew rate to use. * @PIN_CONFIG_END: this is the last enumerator for pin configurations, if * you need to pass in custom configurations to the pin controller, use * PIN_CONFIG_END+1 as the base offset. @@ -127,13 +127,13 @@ enum pin_config_param { PIN_CONFIG_INPUT_SCHMITT_ENABLE, PIN_CONFIG_MODE_LOW_POWER, PIN_CONFIG_MODE_PWM, - PIN_CONFIG_OUTPUT_ENABLE, PIN_CONFIG_OUTPUT, + PIN_CONFIG_OUTPUT_ENABLE, PIN_CONFIG_PERSIST_STATE, PIN_CONFIG_POWER_SOURCE, + PIN_CONFIG_SKEW_DELAY, PIN_CONFIG_SLEEP_HARDWARE_STATE, PIN_CONFIG_SLEW_RATE, - PIN_CONFIG_SKEW_DELAY, PIN_CONFIG_END = 0x7F, PIN_CONFIG_MAX = 0xFF, }; diff --git a/include/linux/pkeys.h b/include/linux/pkeys.h index 2955ba9760..6beb26b715 100644 --- a/include/linux/pkeys.h +++ b/include/linux/pkeys.h @@ -44,10 +44,6 @@ static inline bool arch_pkeys_enabled(void) return false; } -static inline void copy_init_pkru_to_fpregs(void) -{ -} - #endif /* ! CONFIG_ARCH_HAS_PKEYS */ #endif /* _LINUX_PKEYS_H */ diff --git a/include/linux/platform_data/pata_ixp4xx_cf.h b/include/linux/platform_data/pata_ixp4xx_cf.h new file mode 100644 index 0000000000..601ba97fef --- /dev/null +++ b/include/linux/platform_data/pata_ixp4xx_cf.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __PLATFORM_DATA_PATA_IXP4XX_H +#define __PLATFORM_DATA_PATA_IXP4XX_H + +#include + +/* + * This structure provide a means for the board setup code + * to give information to th pata_ixp4xx driver. It is + * passed as platform_data. + */ +struct ixp4xx_pata_data { + volatile u32 *cs0_cfg; + volatile u32 *cs1_cfg; + unsigned long cs0_bits; + unsigned long cs1_bits; + void __iomem *cs0; + void __iomem *cs1; +}; + +#endif diff --git a/include/linux/platform_data/st_sensors_pdata.h b/include/linux/platform_data/st_sensors_pdata.h index e40b28ca89..897051e51b 100644 --- a/include/linux/platform_data/st_sensors_pdata.h +++ b/include/linux/platform_data/st_sensors_pdata.h @@ -13,8 +13,9 @@ /** * struct st_sensors_platform_data - Platform data for the ST sensors * @drdy_int_pin: Redirect DRDY on pin 1 (1) or pin 2 (2). - * Available only for accelerometer and pressure sensors. + * Available only for accelerometer, magnetometer and pressure sensors. * Accelerometer DRDY on LSM330 available only on pin 1 (see datasheet). + * Magnetometer DRDY is supported only on LSM9DS0. * @open_drain: set the interrupt line to be open drain if possible. * @spi_3wire: enable spi-3wire mode. * @pullups: enable/disable i2c controller pullup resistors. diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h index cd81e06086..ed42ea9f60 100644 --- a/include/linux/platform_device.h +++ b/include/linux/platform_device.h @@ -66,9 +66,6 @@ extern void __iomem * devm_platform_ioremap_resource(struct platform_device *pdev, unsigned int index); extern void __iomem * -devm_platform_ioremap_resource_wc(struct platform_device *pdev, - unsigned int index); -extern void __iomem * devm_platform_ioremap_resource_byname(struct platform_device *pdev, const char *name); extern int platform_get_irq(struct platform_device *, unsigned int); diff --git a/include/linux/platform_profile.h b/include/linux/platform_profile.h index a6329003ae..e5cbb6841f 100644 --- a/include/linux/platform_profile.h +++ b/include/linux/platform_profile.h @@ -2,7 +2,7 @@ /* * Platform profile sysfs interface * - * See Documentation/ABI/testing/sysfs-platform_profile.rst for more + * See Documentation/userspace-api/sysfs-platform_profile.rst for more * information. */ diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index dfcfbcecc3..21a0577305 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h @@ -198,6 +198,7 @@ struct generic_pm_domain_data { struct notifier_block *power_nb; int cpu; unsigned int performance_state; + unsigned int rpm_pstate; ktime_t next_wakeup; void *data; }; diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h index 6c08a08536..aab8b35e9f 100644 --- a/include/linux/pm_runtime.h +++ b/include/linux/pm_runtime.h @@ -380,6 +380,9 @@ static inline int pm_runtime_get(struct device *dev) * The possible return values of this function are the same as for * pm_runtime_resume() and the runtime PM usage counter of @dev remains * incremented in all cases, even if it returns an error code. + * Consider using pm_runtime_resume_and_get() instead of it, especially + * if its return value is checked by the caller, as this is likely to result + * in cleaner code. */ static inline int pm_runtime_get_sync(struct device *dev) { diff --git a/include/linux/pmbus.h b/include/linux/pmbus.h index 12cbbf3059..fa9f08164c 100644 --- a/include/linux/pmbus.h +++ b/include/linux/pmbus.h @@ -43,6 +43,36 @@ */ #define PMBUS_NO_CAPABILITY BIT(2) +/* + * PMBUS_READ_STATUS_AFTER_FAILED_CHECK + * + * Some PMBus chips end up in an undefined state when trying to read an + * unsupported register. For such chips, it is necessary to reset the + * chip pmbus controller to a known state after a failed register check. + * This can be done by reading a known register. By setting this flag the + * driver will try to read the STATUS register after each failed + * register check. This read may fail, but it will put the chip in a + * known state. + */ +#define PMBUS_READ_STATUS_AFTER_FAILED_CHECK BIT(3) + +/* + * PMBUS_NO_WRITE_PROTECT + * + * Some PMBus chips respond with invalid data when reading the WRITE_PROTECT + * register. For such chips, this flag should be set so that the PMBus core + * driver doesn't use the WRITE_PROTECT command to determine its behavior. + */ +#define PMBUS_NO_WRITE_PROTECT BIT(4) + +/* + * PMBUS_USE_COEFFICIENTS_CMD + * + * When this flag is set the PMBus core driver will use the COEFFICIENTS + * register to initialize the coefficients for the direct mode format. + */ +#define PMBUS_USE_COEFFICIENTS_CMD BIT(5) + struct pmbus_platform_data { u32 flags; /* Device specific flags */ diff --git a/include/linux/poison.h b/include/linux/poison.h index aff1c9250c..d62ef5a6b4 100644 --- a/include/linux/poison.h +++ b/include/linux/poison.h @@ -78,4 +78,7 @@ /********** security/ **********/ #define KEY_DESTROY 0xbd +/********** net/core/page_pool.c **********/ +#define PP_SIGNATURE (0x40 + POISON_POINTER_DELTA) + #endif diff --git a/include/linux/prandom.h b/include/linux/prandom.h index bbf4b4ad61..056d31317e 100644 --- a/include/linux/prandom.h +++ b/include/linux/prandom.h @@ -111,7 +111,7 @@ static inline u32 __seed(u32 x, u32 m) */ static inline void prandom_seed_state(struct rnd_state *state, u64 seed) { - u32 i = (seed >> 32) ^ (seed << 10) ^ seed; + u32 i = ((seed >> 32) ^ (seed << 10) ^ seed) & 0xffffffffUL; state->s1 = __seed(i, 2U); state->s2 = __seed(i, 8U); diff --git a/include/linux/printk.h b/include/linux/printk.h index fe7eb23516..e834d78f04 100644 --- a/include/linux/printk.h +++ b/include/linux/printk.h @@ -8,6 +8,7 @@ #include #include #include +#include extern const char linux_banner[]; extern const char linux_proc_banner[]; @@ -206,6 +207,7 @@ void __init setup_log_buf(int early); __printf(1, 2) void dump_stack_set_arch_desc(const char *fmt, ...); void dump_stack_print_info(const char *log_lvl); void show_regs_print_info(const char *log_lvl); +extern asmlinkage void dump_stack_lvl(const char *log_lvl) __cold; extern asmlinkage void dump_stack(void) __cold; extern void printk_safe_flush(void); extern void printk_safe_flush_on_panic(void); @@ -269,6 +271,10 @@ static inline void show_regs_print_info(const char *log_lvl) { } +static inline void dump_stack_lvl(const char *log_lvl) +{ +} + static inline void dump_stack(void) { } @@ -282,6 +288,47 @@ static inline void printk_safe_flush_on_panic(void) } #endif +#ifdef CONFIG_SMP +extern int __printk_cpu_trylock(void); +extern void __printk_wait_on_cpu_lock(void); +extern void __printk_cpu_unlock(void); + +/** + * printk_cpu_lock_irqsave() - Acquire the printk cpu-reentrant spinning + * lock and disable interrupts. + * @flags: Stack-allocated storage for saving local interrupt state, + * to be passed to printk_cpu_unlock_irqrestore(). + * + * If the lock is owned by another CPU, spin until it becomes available. + * Interrupts are restored while spinning. + */ +#define printk_cpu_lock_irqsave(flags) \ + for (;;) { \ + local_irq_save(flags); \ + if (__printk_cpu_trylock()) \ + break; \ + local_irq_restore(flags); \ + __printk_wait_on_cpu_lock(); \ + } + +/** + * printk_cpu_unlock_irqrestore() - Release the printk cpu-reentrant spinning + * lock and restore interrupts. + * @flags: Caller's saved interrupt state, from printk_cpu_lock_irqsave(). + */ +#define printk_cpu_unlock_irqrestore(flags) \ + do { \ + __printk_cpu_unlock(); \ + local_irq_restore(flags); \ + } while (0) \ + +#else + +#define printk_cpu_lock_irqsave(flags) ((void)flags) +#define printk_cpu_unlock_irqrestore(flags) ((void)flags) + +#endif /* CONFIG_SMP */ + extern int kptr_restrict; /** @@ -436,27 +483,9 @@ extern int kptr_restrict; #ifdef CONFIG_PRINTK #define printk_once(fmt, ...) \ -({ \ - static bool __section(".data.once") __print_once; \ - bool __ret_print_once = !__print_once; \ - \ - if (!__print_once) { \ - __print_once = true; \ - printk(fmt, ##__VA_ARGS__); \ - } \ - unlikely(__ret_print_once); \ -}) + DO_ONCE_LITE(printk, fmt, ##__VA_ARGS__) #define printk_deferred_once(fmt, ...) \ -({ \ - static bool __section(".data.once") __print_once; \ - bool __ret_print_once = !__print_once; \ - \ - if (!__print_once) { \ - __print_once = true; \ - printk_deferred(fmt, ##__VA_ARGS__); \ - } \ - unlikely(__ret_print_once); \ -}) + DO_ONCE_LITE(printk_deferred, fmt, ##__VA_ARGS__) #else #define printk_once(fmt, ...) \ no_printk(fmt, ##__VA_ARGS__) diff --git a/include/linux/prmt.h b/include/linux/prmt.h new file mode 100644 index 0000000000..24da8364b9 --- /dev/null +++ b/include/linux/prmt.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifdef CONFIG_ACPI_PRMT +void init_prmt(void); +#else +static inline void init_prmt(void) { } +#endif diff --git a/include/linux/property.h b/include/linux/property.h index 0d876316e6..073e680c35 100644 --- a/include/linux/property.h +++ b/include/linux/property.h @@ -119,7 +119,7 @@ struct fwnode_handle *device_get_named_child_node(struct device *dev, struct fwnode_handle *fwnode_handle_get(struct fwnode_handle *fwnode); void fwnode_handle_put(struct fwnode_handle *fwnode); -int fwnode_irq_get(struct fwnode_handle *fwnode, unsigned int index); +int fwnode_irq_get(const struct fwnode_handle *fwnode, unsigned int index); unsigned int device_get_child_node_count(struct device *dev); diff --git a/include/linux/pstore_blk.h b/include/linux/pstore_blk.h index 99564f93d7..924ca07aaf 100644 --- a/include/linux/pstore_blk.h +++ b/include/linux/pstore_blk.h @@ -10,36 +10,15 @@ /** * struct pstore_device_info - back-end pstore/blk driver structure. * - * @total_size: The total size in bytes pstore/blk can use. It must be greater - * than 4096 and be multiple of 4096. * @flags: Refer to macro starting with PSTORE_FLAGS defined in * linux/pstore.h. It means what front-ends this device support. * Zero means all backends for compatible. - * @read: The general read operation. Both of the function parameters - * @size and @offset are relative value to bock device (not the - * whole disk). - * On success, the number of bytes should be returned, others - * means error. - * @write: The same as @read, but the following error number: - * -EBUSY means try to write again later. - * -ENOMSG means to try next zone. - * @erase: The general erase operation for device with special removing - * job. Both of the function parameters @size and @offset are - * relative value to storage. - * Return 0 on success and others on failure. - * @panic_write:The write operation only used for panic case. It's optional - * if you do not care panic log. The parameters are relative - * value to storage. - * On success, the number of bytes should be returned, others - * excluding -ENOMSG mean error. -ENOMSG means to try next zone. + * @zone: The struct pstore_zone_info details. + * */ struct pstore_device_info { - unsigned long total_size; unsigned int flags; - pstore_zone_read_op read; - pstore_zone_write_op write; - pstore_zone_erase_op erase; - pstore_zone_write_op panic_write; + struct pstore_zone_info zone; }; int register_pstore_device(struct pstore_device_info *dev); diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h index 51d7f1b8b3..71fac92377 100644 --- a/include/linux/ptp_clock_kernel.h +++ b/include/linux/ptp_clock_kernel.h @@ -11,7 +11,10 @@ #include #include #include +#include +#include +#define PTP_CLOCK_NAME_LEN 32 /** * struct ptp_clock_request - request PTP clock event * @@ -134,7 +137,7 @@ struct ptp_system_timestamp { struct ptp_clock_info { struct module *owner; - char name[16]; + char name[PTP_CLOCK_NAME_LEN]; s32 max_adj; int n_alarm; int n_ext_ts; @@ -186,6 +189,32 @@ struct ptp_clock_event { }; }; +/** + * scaled_ppm_to_ppb() - convert scaled ppm to ppb + * + * @ppm: Parts per million, but with a 16 bit binary fractional field + */ +static inline long scaled_ppm_to_ppb(long ppm) +{ + /* + * The 'freq' field in the 'struct timex' is in parts per + * million, but with a 16 bit binary fractional field. + * + * We want to calculate + * + * ppb = scaled_ppm * 1000 / 2^16 + * + * which simplifies to + * + * ppb = scaled_ppm * 125 / 2^13 + */ + s64 ppb = 1 + ppm; + + ppb *= 125; + ppb >>= 13; + return (long)ppb; +} + #if IS_REACHABLE(CONFIG_PTP_1588_CLOCK) /** @@ -229,14 +258,6 @@ extern void ptp_clock_event(struct ptp_clock *ptp, extern int ptp_clock_index(struct ptp_clock *ptp); -/** - * scaled_ppm_to_ppb() - convert scaled ppm to ppb - * - * @ppm: Parts per million, but with a 16 bit binary fractional field - */ - -extern long scaled_ppm_to_ppb(long ppm); - /** * ptp_find_pin() - obtain the pin index of a given auxiliary function * @@ -286,6 +307,27 @@ int ptp_schedule_worker(struct ptp_clock *ptp, unsigned long delay); */ void ptp_cancel_worker_sync(struct ptp_clock *ptp); +/** + * ptp_get_vclocks_index() - get all vclocks index on pclock, and + * caller is responsible to free memory + * of vclock_index + * + * @pclock_index: phc index of ptp pclock. + * @vclock_index: pointer to pointer of vclock index. + * + * return number of vclocks. + */ +int ptp_get_vclocks_index(int pclock_index, int **vclock_index); + +/** + * ptp_convert_timestamp() - convert timestamp to a ptp vclock time + * + * @hwtstamps: skb_shared_hwtstamps structure pointer + * @vclock_index: phc index of ptp vclock. + */ +void ptp_convert_timestamp(struct skb_shared_hwtstamps *hwtstamps, + int vclock_index); + #else static inline struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, struct device *parent) @@ -305,6 +347,11 @@ static inline int ptp_schedule_worker(struct ptp_clock *ptp, { return -EOPNOTSUPP; } static inline void ptp_cancel_worker_sync(struct ptp_clock *ptp) { } +static inline int ptp_get_vclocks_index(int pclock_index, int **vclock_index) +{ return 0; } +static inline void ptp_convert_timestamp(struct skb_shared_hwtstamps *hwtstamps, + int vclock_index) +{ } #endif diff --git a/include/linux/pwm.h b/include/linux/pwm.h index 5bb90af499..a0b7e43049 100644 --- a/include/linux/pwm.h +++ b/include/linux/pwm.h @@ -54,12 +54,17 @@ enum { * @duty_cycle: PWM duty cycle (in nanoseconds) * @polarity: PWM polarity * @enabled: PWM enabled status + * @usage_power: If set, the PWM driver is only required to maintain the power + * output but has more freedom regarding signal form. + * If supported, the signal can be optimized, for example to + * improve EMI by phase shifting individual channels. */ struct pwm_state { u64 period; u64 duty_cycle; enum pwm_polarity polarity; bool enabled; + bool usage_power; }; /** @@ -188,6 +193,7 @@ static inline void pwm_init_state(const struct pwm_device *pwm, state->period = args.period; state->polarity = args.polarity; state->duty_cycle = 0; + state->usage_power = false; } /** @@ -399,6 +405,9 @@ void *pwm_get_chip_data(struct pwm_device *pwm); int pwmchip_add(struct pwm_chip *chip); int pwmchip_remove(struct pwm_chip *chip); + +int devm_pwmchip_add(struct device *dev, struct pwm_chip *chip); + struct pwm_device *pwm_request_from_chip(struct pwm_chip *chip, unsigned int index, const char *label); @@ -417,7 +426,6 @@ struct pwm_device *devm_of_pwm_get(struct device *dev, struct device_node *np, struct pwm_device *devm_fwnode_pwm_get(struct device *dev, struct fwnode_handle *fwnode, const char *con_id); -void devm_pwm_put(struct device *dev, struct pwm_device *pwm); #else static inline struct pwm_device *pwm_request(int pwm_id, const char *label) { @@ -524,10 +532,6 @@ devm_fwnode_pwm_get(struct device *dev, struct fwnode_handle *fwnode, { return ERR_PTR(-ENODEV); } - -static inline void devm_pwm_put(struct device *dev, struct pwm_device *pwm) -{ -} #endif static inline void pwm_apply_args(struct pwm_device *pwm) @@ -558,6 +562,7 @@ static inline void pwm_apply_args(struct pwm_device *pwm) state.enabled = false; state.polarity = pwm->args.polarity; state.period = pwm->args.period; + state.usage_power = false; pwm_apply_state(pwm, &state); } diff --git a/include/linux/pxa2xx_ssp.h b/include/linux/pxa2xx_ssp.h index 7f73b26ed2..a3fec2de51 100644 --- a/include/linux/pxa2xx_ssp.h +++ b/include/linux/pxa2xx_ssp.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (C) 2003 Russell King, All Rights Reserved. + * Copyright (C) 2003 Russell King, All Rights Reserved. * * This driver supports the following PXA CPU/SSP ports:- * @@ -11,8 +11,8 @@ * PXA3xx SSP1, SSP2, SSP3, SSP4 */ -#ifndef __LINUX_SSP_H -#define __LINUX_SSP_H +#ifndef __LINUX_PXA2XX_SSP_H +#define __LINUX_PXA2XX_SSP_H #include #include @@ -38,7 +38,6 @@ struct device_node; #define SSDR (0x10) /* SSP Data Write/Data Read Register */ #define SSTO (0x28) /* SSP Time Out Register */ -#define DDS_RATE (0x28) /* SSP DDS Clock Rate Register (Intel Quark) */ #define SSPSP (0x2C) /* SSP Programmable Serial Protocol */ #define SSTSA (0x30) /* SSP Tx Timeslot Active */ #define SSRSA (0x34) /* SSP Rx Timeslot Active */ @@ -60,7 +59,7 @@ struct device_node; /* PXA27x, PXA3xx */ #define SSCR0_EDSS BIT(20) /* Extended data size select */ #define SSCR0_NCS BIT(21) /* Network clock select */ -#define SSCR0_RIM BIT(22) /* Receive FIFO overrrun interrupt mask */ +#define SSCR0_RIM BIT(22) /* Receive FIFO overrun interrupt mask */ #define SSCR0_TUM BIT(23) /* Transmit FIFO underrun interrupt mask */ #define SSCR0_FRDC GENMASK(26, 24) /* Frame rate divider control (mask) */ #define SSCR0_SlotsPerFrm(x) (((x) - 1) << 24) /* Time slots per frame [1..8] */ @@ -105,6 +104,9 @@ struct device_node; #define CE4100_SSCR1_RFT GENMASK(11, 10) /* Receive FIFO Threshold (mask) */ #define CE4100_SSCR1_RxTresh(x) (((x) - 1) << 10) /* level [1..4] */ +/* Intel Quark X1000 */ +#define DDS_RATE 0x28 /* SSP DDS Clock Rate Register */ + /* QUARK_X1000 SSCR0 bit definition */ #define QUARK_X1000_SSCR0_DSS GENMASK(4, 0) /* Data Size Select (mask) */ #define QUARK_X1000_SSCR0_DataSize(x) ((x) - 1) /* Data Size Select [4..32] */ @@ -124,7 +126,7 @@ struct device_node; #define QUARK_X1000_SSCR1_EFWR BIT(16) /* Enable FIFO Write/Read */ #define QUARK_X1000_SSCR1_STRF BIT(17) /* Select FIFO or EFWR */ -/* extra bits in PXA255, PXA26x and PXA27x SSP ports */ +/* Extra bits in PXA255, PXA26x and PXA27x SSP ports */ #define SSCR0_TISSP (1 << 4) /* TI Sync Serial Protocol */ #define SSCR0_PSP (3 << 4) /* PSP - Programmable Serial Protocol */ @@ -181,6 +183,21 @@ struct device_node; #define SSACD_ACPS(x) ((x) << 4) /* Audio clock PLL select */ #define SSACD_SCDX8 BIT(7) /* SYSCLK division ratio select */ +/* Intel Merrifield SSP */ +#define SFIFOL 0x68 /* FIFO level */ +#define SFIFOTT 0x6c /* FIFO trigger threshold */ + +#define RX_THRESH_MRFLD_DFLT 16 +#define TX_THRESH_MRFLD_DFLT 16 + +#define SFIFOL_TFL_MASK GENMASK(15, 0) /* Transmit FIFO Level mask */ +#define SFIFOL_RFL_MASK GENMASK(31, 16) /* Receive FIFO Level mask */ + +#define SFIFOTT_TFT GENMASK(15, 0) /* Transmit FIFO Threshold (mask) */ +#define SFIFOTT_TxThresh(x) (((x) - 1) << 0) /* TX FIFO trigger threshold / level */ +#define SFIFOTT_RFT GENMASK(31, 16) /* Receive FIFO Threshold (mask) */ +#define SFIFOTT_RxThresh(x) (((x) - 1) << 16) /* RX FIFO trigger threshold / level */ + /* LPSS SSP */ #define SSITF 0x44 /* TX FIFO trigger level */ #define SSITF_TxHiThresh(x) (((x) - 1) << 0) @@ -203,8 +220,10 @@ enum pxa_ssp_type { MMP2_SSP, PXA910_SSP, CE4100_SSP, + MRFLD_SSP, QUARK_X1000_SSP, - LPSS_LPT_SSP, /* Keep LPSS types sorted with lpss_platforms[] */ + /* Keep LPSS types sorted with lpss_platforms[] */ + LPSS_LPT_SSP, LPSS_BYT_SSP, LPSS_BSW_SSP, LPSS_SPT_SSP, @@ -252,6 +271,22 @@ static inline u32 pxa_ssp_read_reg(struct ssp_device *dev, u32 reg) return __raw_readl(dev->mmio_base + reg); } +static inline void pxa_ssp_enable(struct ssp_device *ssp) +{ + u32 sscr0; + + sscr0 = pxa_ssp_read_reg(ssp, SSCR0) | SSCR0_SSE; + pxa_ssp_write_reg(ssp, SSCR0, sscr0); +} + +static inline void pxa_ssp_disable(struct ssp_device *ssp) +{ + u32 sscr0; + + sscr0 = pxa_ssp_read_reg(ssp, SSCR0) & ~SSCR0_SSE; + pxa_ssp_write_reg(ssp, SSCR0, sscr0); +} + #if IS_ENABLED(CONFIG_PXA_SSP) struct ssp_device *pxa_ssp_request(int port, const char *label); void pxa_ssp_free(struct ssp_device *); @@ -270,4 +305,4 @@ static inline struct ssp_device *pxa_ssp_request_of(const struct device_node *n, static inline void pxa_ssp_free(struct ssp_device *ssp) {} #endif -#endif +#endif /* __LINUX_PXA2XX_SSP_H */ diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h index 977807e1be..0a3807e927 100644 --- a/include/linux/qed/common_hsi.h +++ b/include/linux/qed/common_hsi.h @@ -702,7 +702,7 @@ enum mf_mode { /* Per-protocol connection types */ enum protocol_type { - PROTOCOLID_ISCSI, + PROTOCOLID_TCP_ULP, PROTOCOLID_FCOE, PROTOCOLID_ROCE, PROTOCOLID_CORE, diff --git a/include/linux/qed/nvmetcp_common.h b/include/linux/qed/nvmetcp_common.h new file mode 100644 index 0000000000..5a2ab06063 --- /dev/null +++ b/include/linux/qed/nvmetcp_common.h @@ -0,0 +1,531 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ +/* Copyright 2021 Marvell. All rights reserved. */ + +#ifndef __NVMETCP_COMMON__ +#define __NVMETCP_COMMON__ + +#include "tcp_common.h" +#include + +#define NVMETCP_SLOW_PATH_LAYER_CODE (6) +#define NVMETCP_WQE_NUM_SGES_SLOWIO (0xf) + +/* NVMeTCP firmware function init parameters */ +struct nvmetcp_spe_func_init { + __le16 half_way_close_timeout; + u8 num_sq_pages_in_ring; + u8 num_r2tq_pages_in_ring; + u8 num_uhq_pages_in_ring; + u8 ll2_rx_queue_id; + u8 flags; +#define NVMETCP_SPE_FUNC_INIT_COUNTERS_EN_MASK 0x1 +#define NVMETCP_SPE_FUNC_INIT_COUNTERS_EN_SHIFT 0 +#define NVMETCP_SPE_FUNC_INIT_NVMETCP_MODE_MASK 0x1 +#define NVMETCP_SPE_FUNC_INIT_NVMETCP_MODE_SHIFT 1 +#define NVMETCP_SPE_FUNC_INIT_RESERVED0_MASK 0x3F +#define NVMETCP_SPE_FUNC_INIT_RESERVED0_SHIFT 2 + u8 debug_flags; + __le16 reserved1; + u8 params; +#define NVMETCP_SPE_FUNC_INIT_MAX_SYN_RT_MASK 0xF +#define NVMETCP_SPE_FUNC_INIT_MAX_SYN_RT_SHIFT 0 +#define NVMETCP_SPE_FUNC_INIT_RESERVED1_MASK 0xF +#define NVMETCP_SPE_FUNC_INIT_RESERVED1_SHIFT 4 + u8 reserved2[5]; + struct scsi_init_func_params func_params; + struct scsi_init_func_queues q_params; +}; + +/* NVMeTCP init params passed by driver to FW in NVMeTCP init ramrod. */ +struct nvmetcp_init_ramrod_params { + struct nvmetcp_spe_func_init nvmetcp_init_spe; + struct tcp_init_params tcp_init; +}; + +/* NVMeTCP Ramrod Command IDs */ +enum nvmetcp_ramrod_cmd_id { + NVMETCP_RAMROD_CMD_ID_UNUSED = 0, + NVMETCP_RAMROD_CMD_ID_INIT_FUNC = 1, + NVMETCP_RAMROD_CMD_ID_DESTROY_FUNC = 2, + NVMETCP_RAMROD_CMD_ID_OFFLOAD_CONN = 3, + NVMETCP_RAMROD_CMD_ID_UPDATE_CONN = 4, + NVMETCP_RAMROD_CMD_ID_TERMINATION_CONN = 5, + NVMETCP_RAMROD_CMD_ID_CLEAR_SQ = 6, + MAX_NVMETCP_RAMROD_CMD_ID +}; + +struct nvmetcp_glbl_queue_entry { + struct regpair cq_pbl_addr; + struct regpair reserved; +}; + +/* NVMeTCP conn level EQEs */ +enum nvmetcp_eqe_opcode { + NVMETCP_EVENT_TYPE_INIT_FUNC = 0, /* Response after init Ramrod */ + NVMETCP_EVENT_TYPE_DESTROY_FUNC, /* Response after destroy Ramrod */ + NVMETCP_EVENT_TYPE_OFFLOAD_CONN,/* Response after option 2 offload Ramrod */ + NVMETCP_EVENT_TYPE_UPDATE_CONN, /* Response after update Ramrod */ + NVMETCP_EVENT_TYPE_CLEAR_SQ, /* Response after clear sq Ramrod */ + NVMETCP_EVENT_TYPE_TERMINATE_CONN, /* Response after termination Ramrod */ + NVMETCP_EVENT_TYPE_RESERVED0, + NVMETCP_EVENT_TYPE_RESERVED1, + NVMETCP_EVENT_TYPE_ASYN_CONNECT_COMPLETE, /* Connect completed (A-syn EQE) */ + NVMETCP_EVENT_TYPE_ASYN_TERMINATE_DONE, /* Termination completed (A-syn EQE) */ + NVMETCP_EVENT_TYPE_START_OF_ERROR_TYPES = 10, /* Separate EQs from err EQs */ + NVMETCP_EVENT_TYPE_ASYN_ABORT_RCVD, /* TCP RST packet receive (A-syn EQE) */ + NVMETCP_EVENT_TYPE_ASYN_CLOSE_RCVD, /* TCP FIN packet receive (A-syn EQE) */ + NVMETCP_EVENT_TYPE_ASYN_SYN_RCVD, /* TCP SYN+ACK packet receive (A-syn EQE) */ + NVMETCP_EVENT_TYPE_ASYN_MAX_RT_TIME, /* TCP max retransmit time (A-syn EQE) */ + NVMETCP_EVENT_TYPE_ASYN_MAX_RT_CNT, /* TCP max retransmit count (A-syn EQE) */ + NVMETCP_EVENT_TYPE_ASYN_MAX_KA_PROBES_CNT, /* TCP ka probes count (A-syn EQE) */ + NVMETCP_EVENT_TYPE_ASYN_FIN_WAIT2, /* TCP fin wait 2 (A-syn EQE) */ + NVMETCP_EVENT_TYPE_NVMETCP_CONN_ERROR, /* NVMeTCP error response (A-syn EQE) */ + NVMETCP_EVENT_TYPE_TCP_CONN_ERROR, /* NVMeTCP error - tcp error (A-syn EQE) */ + MAX_NVMETCP_EQE_OPCODE +}; + +struct nvmetcp_conn_offload_section { + struct regpair cccid_itid_table_addr; /* CCCID to iTID table address */ + __le16 cccid_max_range; /* CCCID max value - used for validation */ + __le16 reserved[3]; +}; + +/* NVMe TCP connection offload params passed by driver to FW in NVMeTCP offload ramrod */ +struct nvmetcp_conn_offload_params { + struct regpair sq_pbl_addr; + struct regpair r2tq_pbl_addr; + struct regpair xhq_pbl_addr; + struct regpair uhq_pbl_addr; + __le16 physical_q0; + __le16 physical_q1; + u8 flags; +#define NVMETCP_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_MASK 0x1 +#define NVMETCP_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_SHIFT 0 +#define NVMETCP_CONN_OFFLOAD_PARAMS_TARGET_MODE_MASK 0x1 +#define NVMETCP_CONN_OFFLOAD_PARAMS_TARGET_MODE_SHIFT 1 +#define NVMETCP_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_MASK 0x1 +#define NVMETCP_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_SHIFT 2 +#define NVMETCP_CONN_OFFLOAD_PARAMS_NVMETCP_MODE_MASK 0x1 +#define NVMETCP_CONN_OFFLOAD_PARAMS_NVMETCP_MODE_SHIFT 3 +#define NVMETCP_CONN_OFFLOAD_PARAMS_RESERVED1_MASK 0xF +#define NVMETCP_CONN_OFFLOAD_PARAMS_RESERVED1_SHIFT 4 + u8 default_cq; + __le16 reserved0; + __le32 reserved1; + __le32 initial_ack; + + struct nvmetcp_conn_offload_section nvmetcp; /* NVMe/TCP section */ +}; + +/* NVMe TCP and TCP connection offload params passed by driver to FW in NVMeTCP offload ramrod. */ +struct nvmetcp_spe_conn_offload { + __le16 reserved; + __le16 conn_id; + __le32 fw_cid; + struct nvmetcp_conn_offload_params nvmetcp; + struct tcp_offload_params_opt2 tcp; +}; + +/* NVMeTCP connection update params passed by driver to FW in NVMETCP update ramrod. */ +struct nvmetcp_conn_update_ramrod_params { + __le16 reserved0; + __le16 conn_id; + __le32 reserved1; + u8 flags; +#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_HD_EN_MASK 0x1 +#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_HD_EN_SHIFT 0 +#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_DD_EN_MASK 0x1 +#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_DD_EN_SHIFT 1 +#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED0_MASK 0x1 +#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED0_SHIFT 2 +#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_MASK 0x1 +#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_DATA_SHIFT 3 +#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED2_MASK 0x1 +#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED2_SHIFT 4 +#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED3_MASK 0x1 +#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED3_SHIFT 5 +#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED4_MASK 0x1 +#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED4_SHIFT 6 +#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED5_MASK 0x1 +#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED5_SHIFT 7 + u8 reserved3[3]; + __le32 max_seq_size; + __le32 max_send_pdu_length; + __le32 max_recv_pdu_length; + __le32 first_seq_length; + __le32 reserved4[5]; +}; + +/* NVMeTCP connection termination request */ +struct nvmetcp_spe_conn_termination { + __le16 reserved0; + __le16 conn_id; + __le32 reserved1; + u8 abortive; + u8 reserved2[7]; + struct regpair reserved3; + struct regpair reserved4; +}; + +struct nvmetcp_dif_flags { + u8 flags; +}; + +enum nvmetcp_wqe_type { + NVMETCP_WQE_TYPE_NORMAL, + NVMETCP_WQE_TYPE_TASK_CLEANUP, + NVMETCP_WQE_TYPE_MIDDLE_PATH, + NVMETCP_WQE_TYPE_IC, + MAX_NVMETCP_WQE_TYPE +}; + +struct nvmetcp_wqe { + __le16 task_id; + u8 flags; +#define NVMETCP_WQE_WQE_TYPE_MASK 0x7 /* [use nvmetcp_wqe_type] */ +#define NVMETCP_WQE_WQE_TYPE_SHIFT 0 +#define NVMETCP_WQE_NUM_SGES_MASK 0xF +#define NVMETCP_WQE_NUM_SGES_SHIFT 3 +#define NVMETCP_WQE_RESPONSE_MASK 0x1 +#define NVMETCP_WQE_RESPONSE_SHIFT 7 + struct nvmetcp_dif_flags prot_flags; + __le32 contlen_cdbsize; +#define NVMETCP_WQE_CONT_LEN_MASK 0xFFFFFF +#define NVMETCP_WQE_CONT_LEN_SHIFT 0 +#define NVMETCP_WQE_CDB_SIZE_OR_NVMETCP_CMD_MASK 0xFF +#define NVMETCP_WQE_CDB_SIZE_OR_NVMETCP_CMD_SHIFT 24 +}; + +struct nvmetcp_host_cccid_itid_entry { + __le16 itid; +}; + +struct nvmetcp_connect_done_results { + __le16 icid; + __le16 conn_id; + struct tcp_ulp_connect_done_params params; +}; + +struct nvmetcp_eqe_data { + __le16 icid; + __le16 conn_id; + __le16 reserved; + u8 error_code; + u8 error_pdu_opcode_reserved; +#define NVMETCP_EQE_DATA_ERROR_PDU_OPCODE_MASK 0x3F +#define NVMETCP_EQE_DATA_ERROR_PDU_OPCODE_SHIFT 0 +#define NVMETCP_EQE_DATA_ERROR_PDU_OPCODE_VALID_MASK 0x1 +#define NVMETCP_EQE_DATA_ERROR_PDU_OPCODE_VALID_SHIFT 6 +#define NVMETCP_EQE_DATA_RESERVED0_MASK 0x1 +#define NVMETCP_EQE_DATA_RESERVED0_SHIFT 7 +}; + +enum nvmetcp_task_type { + NVMETCP_TASK_TYPE_HOST_WRITE, + NVMETCP_TASK_TYPE_HOST_READ, + NVMETCP_TASK_TYPE_INIT_CONN_REQUEST, + NVMETCP_TASK_TYPE_RESERVED0, + NVMETCP_TASK_TYPE_CLEANUP, + NVMETCP_TASK_TYPE_HOST_READ_NO_CQE, + MAX_NVMETCP_TASK_TYPE +}; + +struct nvmetcp_db_data { + u8 params; +#define NVMETCP_DB_DATA_DEST_MASK 0x3 /* destination of doorbell (use enum db_dest) */ +#define NVMETCP_DB_DATA_DEST_SHIFT 0 +#define NVMETCP_DB_DATA_AGG_CMD_MASK 0x3 /* aggregative command to CM (use enum db_agg_cmd_sel) */ +#define NVMETCP_DB_DATA_AGG_CMD_SHIFT 2 +#define NVMETCP_DB_DATA_BYPASS_EN_MASK 0x1 /* enable QM bypass */ +#define NVMETCP_DB_DATA_BYPASS_EN_SHIFT 4 +#define NVMETCP_DB_DATA_RESERVED_MASK 0x1 +#define NVMETCP_DB_DATA_RESERVED_SHIFT 5 +#define NVMETCP_DB_DATA_AGG_VAL_SEL_MASK 0x3 /* aggregative value selection */ +#define NVMETCP_DB_DATA_AGG_VAL_SEL_SHIFT 6 + u8 agg_flags; /* bit for every DQ counter flags in CM context that DQ can increment */ + __le16 sq_prod; +}; + +struct nvmetcp_fw_nvmf_cqe { + __le32 reserved[4]; +}; + +struct nvmetcp_icresp_mdata { + u8 digest; + u8 cpda; + __le16 pfv; + __le32 maxdata; + __le16 rsvd[4]; +}; + +union nvmetcp_fw_cqe_data { + struct nvmetcp_fw_nvmf_cqe nvme_cqe; + struct nvmetcp_icresp_mdata icresp_mdata; +}; + +struct nvmetcp_fw_cqe { + __le16 conn_id; + u8 cqe_type; + u8 cqe_error_status_bits; +#define CQE_ERROR_BITMAP_DIF_ERR_BITS_MASK 0x7 +#define CQE_ERROR_BITMAP_DIF_ERR_BITS_SHIFT 0 +#define CQE_ERROR_BITMAP_DATA_DIGEST_ERR_MASK 0x1 +#define CQE_ERROR_BITMAP_DATA_DIGEST_ERR_SHIFT 3 +#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN_MASK 0x1 +#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN_SHIFT 4 + __le16 itid; + u8 task_type; + u8 fw_dbg_field; + u8 caused_conn_err; + u8 reserved0[3]; + __le32 reserved1; + union nvmetcp_fw_cqe_data cqe_data; + struct regpair task_opaque; + __le32 reserved[6]; +}; + +enum nvmetcp_fw_cqes_type { + NVMETCP_FW_CQE_TYPE_NORMAL = 1, + NVMETCP_FW_CQE_TYPE_RESERVED0, + NVMETCP_FW_CQE_TYPE_RESERVED1, + NVMETCP_FW_CQE_TYPE_CLEANUP, + NVMETCP_FW_CQE_TYPE_DUMMY, + MAX_NVMETCP_FW_CQES_TYPE +}; + +struct ystorm_nvmetcp_task_state { + struct scsi_cached_sges data_desc; + struct scsi_sgl_params sgl_params; + __le32 resrved0; + __le32 buffer_offset; + __le16 cccid; + struct nvmetcp_dif_flags dif_flags; + u8 flags; +#define YSTORM_NVMETCP_TASK_STATE_LOCAL_COMP_MASK 0x1 +#define YSTORM_NVMETCP_TASK_STATE_LOCAL_COMP_SHIFT 0 +#define YSTORM_NVMETCP_TASK_STATE_SLOW_IO_MASK 0x1 +#define YSTORM_NVMETCP_TASK_STATE_SLOW_IO_SHIFT 1 +#define YSTORM_NVMETCP_TASK_STATE_SET_DIF_OFFSET_MASK 0x1 +#define YSTORM_NVMETCP_TASK_STATE_SET_DIF_OFFSET_SHIFT 2 +#define YSTORM_NVMETCP_TASK_STATE_SEND_W_RSP_MASK 0x1 +#define YSTORM_NVMETCP_TASK_STATE_SEND_W_RSP_SHIFT 3 +}; + +struct ystorm_nvmetcp_task_rxmit_opt { + __le32 reserved[4]; +}; + +struct nvmetcp_task_hdr { + __le32 reg[18]; +}; + +struct nvmetcp_task_hdr_aligned { + struct nvmetcp_task_hdr task_hdr; + __le32 reserved[2]; /* HSI_COMMENT: Align to QREG */ +}; + +struct e5_tdif_task_context { + __le32 reserved[16]; +}; + +struct e5_rdif_task_context { + __le32 reserved[12]; +}; + +struct ystorm_nvmetcp_task_st_ctx { + struct ystorm_nvmetcp_task_state state; + struct ystorm_nvmetcp_task_rxmit_opt rxmit_opt; + struct nvmetcp_task_hdr_aligned pdu_hdr; +}; + +struct mstorm_nvmetcp_task_st_ctx { + struct scsi_cached_sges data_desc; + struct scsi_sgl_params sgl_params; + __le32 rem_task_size; + __le32 data_buffer_offset; + u8 task_type; + struct nvmetcp_dif_flags dif_flags; + __le16 dif_task_icid; + struct regpair reserved0; + __le32 expected_itt; + __le32 reserved1; +}; + +struct ustorm_nvmetcp_task_st_ctx { + __le32 rem_rcv_len; + __le32 exp_data_transfer_len; + __le32 exp_data_sn; + struct regpair reserved0; + __le32 reg1_map; +#define REG1_NUM_SGES_MASK 0xF +#define REG1_NUM_SGES_SHIFT 0 +#define REG1_RESERVED1_MASK 0xFFFFFFF +#define REG1_RESERVED1_SHIFT 4 + u8 flags2; +#define USTORM_NVMETCP_TASK_ST_CTX_AHS_EXIST_MASK 0x1 +#define USTORM_NVMETCP_TASK_ST_CTX_AHS_EXIST_SHIFT 0 +#define USTORM_NVMETCP_TASK_ST_CTX_RESERVED1_MASK 0x7F +#define USTORM_NVMETCP_TASK_ST_CTX_RESERVED1_SHIFT 1 + struct nvmetcp_dif_flags dif_flags; + __le16 reserved3; + __le16 tqe_opaque[2]; + __le32 reserved5; + __le32 nvme_tcp_opaque_lo; + __le32 nvme_tcp_opaque_hi; + u8 task_type; + u8 error_flags; +#define USTORM_NVMETCP_TASK_ST_CTX_DATA_DIGEST_ERROR_MASK 0x1 +#define USTORM_NVMETCP_TASK_ST_CTX_DATA_DIGEST_ERROR_SHIFT 0 +#define USTORM_NVMETCP_TASK_ST_CTX_DATA_TRUNCATED_ERROR_MASK 0x1 +#define USTORM_NVMETCP_TASK_ST_CTX_DATA_TRUNCATED_ERROR_SHIFT 1 +#define USTORM_NVMETCP_TASK_ST_CTX_UNDER_RUN_ERROR_MASK 0x1 +#define USTORM_NVMETCP_TASK_ST_CTX_UNDER_RUN_ERROR_SHIFT 2 +#define USTORM_NVMETCP_TASK_ST_CTX_NVME_TCP_MASK 0x1 +#define USTORM_NVMETCP_TASK_ST_CTX_NVME_TCP_SHIFT 3 + u8 flags; +#define USTORM_NVMETCP_TASK_ST_CTX_CQE_WRITE_MASK 0x3 +#define USTORM_NVMETCP_TASK_ST_CTX_CQE_WRITE_SHIFT 0 +#define USTORM_NVMETCP_TASK_ST_CTX_LOCAL_COMP_MASK 0x1 +#define USTORM_NVMETCP_TASK_ST_CTX_LOCAL_COMP_SHIFT 2 +#define USTORM_NVMETCP_TASK_ST_CTX_Q0_R2TQE_WRITE_MASK 0x1 +#define USTORM_NVMETCP_TASK_ST_CTX_Q0_R2TQE_WRITE_SHIFT 3 +#define USTORM_NVMETCP_TASK_ST_CTX_TOTAL_DATA_ACKED_DONE_MASK 0x1 +#define USTORM_NVMETCP_TASK_ST_CTX_TOTAL_DATA_ACKED_DONE_SHIFT 4 +#define USTORM_NVMETCP_TASK_ST_CTX_HQ_SCANNED_DONE_MASK 0x1 +#define USTORM_NVMETCP_TASK_ST_CTX_HQ_SCANNED_DONE_SHIFT 5 +#define USTORM_NVMETCP_TASK_ST_CTX_R2T2RECV_DONE_MASK 0x1 +#define USTORM_NVMETCP_TASK_ST_CTX_R2T2RECV_DONE_SHIFT 6 + u8 cq_rss_number; +}; + +struct e5_ystorm_nvmetcp_task_ag_ctx { + u8 reserved /* cdu_validation */; + u8 byte1 /* state_and_core_id */; + __le16 word0 /* icid */; + u8 flags0; + u8 flags1; + u8 flags2; + u8 flags3; + __le32 TTT; + u8 byte2; + u8 byte3; + u8 byte4; + u8 e4_reserved7; +}; + +struct e5_mstorm_nvmetcp_task_ag_ctx { + u8 cdu_validation; + u8 byte1; + __le16 task_cid; + u8 flags0; +#define E5_MSTORM_NVMETCP_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF +#define E5_MSTORM_NVMETCP_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 +#define E5_MSTORM_NVMETCP_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define E5_MSTORM_NVMETCP_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 +#define E5_MSTORM_NVMETCP_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_MASK 0x1 +#define E5_MSTORM_NVMETCP_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_SHIFT 5 +#define E5_MSTORM_NVMETCP_TASK_AG_CTX_VALID_MASK 0x1 +#define E5_MSTORM_NVMETCP_TASK_AG_CTX_VALID_SHIFT 6 +#define E5_MSTORM_NVMETCP_TASK_AG_CTX_TASK_CLEANUP_FLAG_MASK 0x1 +#define E5_MSTORM_NVMETCP_TASK_AG_CTX_TASK_CLEANUP_FLAG_SHIFT 7 + u8 flags1; +#define E5_MSTORM_NVMETCP_TASK_AG_CTX_TASK_CLEANUP_CF_MASK 0x3 +#define E5_MSTORM_NVMETCP_TASK_AG_CTX_TASK_CLEANUP_CF_SHIFT 0 +#define E5_MSTORM_NVMETCP_TASK_AG_CTX_CF1_MASK 0x3 +#define E5_MSTORM_NVMETCP_TASK_AG_CTX_CF1_SHIFT 2 +#define E5_MSTORM_NVMETCP_TASK_AG_CTX_CF2_MASK 0x3 +#define E5_MSTORM_NVMETCP_TASK_AG_CTX_CF2_SHIFT 4 +#define E5_MSTORM_NVMETCP_TASK_AG_CTX_TASK_CLEANUP_CF_EN_MASK 0x1 +#define E5_MSTORM_NVMETCP_TASK_AG_CTX_TASK_CLEANUP_CF_EN_SHIFT 6 +#define E5_MSTORM_NVMETCP_TASK_AG_CTX_CF1EN_MASK 0x1 +#define E5_MSTORM_NVMETCP_TASK_AG_CTX_CF1EN_SHIFT 7 + u8 flags2; + u8 flags3; + __le32 reg0; + u8 byte2; + u8 byte3; + u8 byte4; + u8 e4_reserved7; +}; + +struct e5_ustorm_nvmetcp_task_ag_ctx { + u8 reserved; + u8 state_and_core_id; + __le16 icid; + u8 flags0; +#define E5_USTORM_NVMETCP_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF +#define E5_USTORM_NVMETCP_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 +#define E5_USTORM_NVMETCP_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define E5_USTORM_NVMETCP_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 +#define E5_USTORM_NVMETCP_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_MASK 0x1 +#define E5_USTORM_NVMETCP_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_SHIFT 5 +#define E5_USTORM_NVMETCP_TASK_AG_CTX_HQ_SCANNED_CF_MASK 0x3 +#define E5_USTORM_NVMETCP_TASK_AG_CTX_HQ_SCANNED_CF_SHIFT 6 + u8 flags1; +#define E5_USTORM_NVMETCP_TASK_AG_CTX_RESERVED1_MASK 0x3 +#define E5_USTORM_NVMETCP_TASK_AG_CTX_RESERVED1_SHIFT 0 +#define E5_USTORM_NVMETCP_TASK_AG_CTX_R2T2RECV_MASK 0x3 +#define E5_USTORM_NVMETCP_TASK_AG_CTX_R2T2RECV_SHIFT 2 +#define E5_USTORM_NVMETCP_TASK_AG_CTX_CF3_MASK 0x3 +#define E5_USTORM_NVMETCP_TASK_AG_CTX_CF3_SHIFT 4 +#define E5_USTORM_NVMETCP_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3 +#define E5_USTORM_NVMETCP_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6 + u8 flags2; +#define E5_USTORM_NVMETCP_TASK_AG_CTX_HQ_SCANNED_CF_EN_MASK 0x1 +#define E5_USTORM_NVMETCP_TASK_AG_CTX_HQ_SCANNED_CF_EN_SHIFT 0 +#define E5_USTORM_NVMETCP_TASK_AG_CTX_DISABLE_DATA_ACKED_MASK 0x1 +#define E5_USTORM_NVMETCP_TASK_AG_CTX_DISABLE_DATA_ACKED_SHIFT 1 +#define E5_USTORM_NVMETCP_TASK_AG_CTX_R2T2RECV_EN_MASK 0x1 +#define E5_USTORM_NVMETCP_TASK_AG_CTX_R2T2RECV_EN_SHIFT 2 +#define E5_USTORM_NVMETCP_TASK_AG_CTX_CF3EN_MASK 0x1 +#define E5_USTORM_NVMETCP_TASK_AG_CTX_CF3EN_SHIFT 3 +#define E5_USTORM_NVMETCP_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1 +#define E5_USTORM_NVMETCP_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4 +#define E5_USTORM_NVMETCP_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_MASK 0x1 +#define E5_USTORM_NVMETCP_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_SHIFT 5 +#define E5_USTORM_NVMETCP_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define E5_USTORM_NVMETCP_TASK_AG_CTX_RULE1EN_SHIFT 6 +#define E5_USTORM_NVMETCP_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_MASK 0x1 +#define E5_USTORM_NVMETCP_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_SHIFT 7 + u8 flags3; + u8 flags4; +#define E5_USTORM_NVMETCP_TASK_AG_CTX_E4_RESERVED5_MASK 0x3 +#define E5_USTORM_NVMETCP_TASK_AG_CTX_E4_RESERVED5_SHIFT 0 +#define E5_USTORM_NVMETCP_TASK_AG_CTX_E4_RESERVED6_MASK 0x1 +#define E5_USTORM_NVMETCP_TASK_AG_CTX_E4_RESERVED6_SHIFT 2 +#define E5_USTORM_NVMETCP_TASK_AG_CTX_E4_RESERVED7_MASK 0x1 +#define E5_USTORM_NVMETCP_TASK_AG_CTX_E4_RESERVED7_SHIFT 3 +#define E5_USTORM_NVMETCP_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF +#define E5_USTORM_NVMETCP_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4 + u8 byte2; + u8 byte3; + u8 e4_reserved8; + __le32 dif_err_intervals; + __le32 dif_error_1st_interval; + __le32 rcv_cont_len; + __le32 exp_cont_len; + __le32 total_data_acked; + __le32 exp_data_acked; + __le16 word1; + __le16 next_tid; + __le32 hdr_residual_count; + __le32 exp_r2t_sn; +}; + +struct e5_nvmetcp_task_context { + struct ystorm_nvmetcp_task_st_ctx ystorm_st_context; + struct e5_ystorm_nvmetcp_task_ag_ctx ystorm_ag_context; + struct regpair ystorm_ag_padding[2]; + struct e5_tdif_task_context tdif_context; + struct e5_mstorm_nvmetcp_task_ag_ctx mstorm_ag_context; + struct regpair mstorm_ag_padding[2]; + struct e5_ustorm_nvmetcp_task_ag_ctx ustorm_ag_context; + struct regpair ustorm_ag_padding[2]; + struct mstorm_nvmetcp_task_st_ctx mstorm_st_context; + struct regpair mstorm_st_padding[2]; + struct ustorm_nvmetcp_task_st_ctx ustorm_st_context; + struct regpair ustorm_st_padding[2]; + struct e5_rdif_task_context rdif_context; +}; + +#endif /* __NVMETCP_COMMON__*/ diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index 68d17a4fbf..850b989916 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -542,6 +542,22 @@ struct qed_iscsi_pf_params { u8 bdq_pbl_num_entries[3]; }; +struct qed_nvmetcp_pf_params { + u64 glbl_q_params_addr; + u16 cq_num_entries; + u16 num_cons; + u16 num_tasks; + u8 num_sq_pages_in_ring; + u8 num_r2tq_pages_in_ring; + u8 num_uhq_pages_in_ring; + u8 num_queues; + u8 gl_rq_pi; + u8 gl_cmd_pi; + u8 debug_mode; + u8 ll2_ooo_queue_id; + u16 min_rto; +}; + struct qed_rdma_pf_params { /* Supplied to QED during resource allocation (may affect the ILT and * the doorbell BAR). @@ -560,6 +576,7 @@ struct qed_pf_params { struct qed_eth_pf_params eth_pf_params; struct qed_fcoe_pf_params fcoe_pf_params; struct qed_iscsi_pf_params iscsi_pf_params; + struct qed_nvmetcp_pf_params nvmetcp_pf_params; struct qed_rdma_pf_params rdma_pf_params; }; @@ -662,6 +679,7 @@ enum qed_sb_type { enum qed_protocol { QED_PROTOCOL_ETH, QED_PROTOCOL_ISCSI, + QED_PROTOCOL_NVMETCP = QED_PROTOCOL_ISCSI, QED_PROTOCOL_FCOE, }; diff --git a/include/linux/qed/qed_ll2_if.h b/include/linux/qed/qed_ll2_if.h index ea273ba1c9..ff808d2488 100644 --- a/include/linux/qed/qed_ll2_if.h +++ b/include/linux/qed/qed_ll2_if.h @@ -18,7 +18,7 @@ enum qed_ll2_conn_type { QED_LL2_TYPE_FCOE, - QED_LL2_TYPE_ISCSI, + QED_LL2_TYPE_TCP_ULP, QED_LL2_TYPE_TEST, QED_LL2_TYPE_OOO, QED_LL2_TYPE_RESERVED2, diff --git a/include/linux/qed/qed_nvmetcp_if.h b/include/linux/qed/qed_nvmetcp_if.h new file mode 100644 index 0000000000..14671bc19e --- /dev/null +++ b/include/linux/qed/qed_nvmetcp_if.h @@ -0,0 +1,240 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ +/* Copyright 2021 Marvell. All rights reserved. */ + +#ifndef _QED_NVMETCP_IF_H +#define _QED_NVMETCP_IF_H +#include +#include +#include +#include + +#define QED_NVMETCP_MAX_IO_SIZE 0x800000 +#define QED_NVMETCP_CMN_HDR_SIZE (sizeof(struct nvme_tcp_hdr)) +#define QED_NVMETCP_CMD_HDR_SIZE (sizeof(struct nvme_tcp_cmd_pdu)) +#define QED_NVMETCP_NON_IO_HDR_SIZE ((QED_NVMETCP_CMN_HDR_SIZE + 16)) + +typedef int (*nvmetcp_event_cb_t) (void *context, + u8 fw_event_code, void *fw_handle); + +struct qed_dev_nvmetcp_info { + struct qed_dev_info common; + u8 port_id; /* Physical port */ + u8 num_cqs; +}; + +#define MAX_TID_BLOCKS_NVMETCP (512) +struct qed_nvmetcp_tid { + u32 size; /* In bytes per task */ + u32 num_tids_per_block; + u8 *blocks[MAX_TID_BLOCKS_NVMETCP]; +}; + +struct qed_nvmetcp_id_params { + u8 mac[ETH_ALEN]; + u32 ip[4]; + u16 port; +}; + +struct qed_nvmetcp_params_offload { + /* FW initializations */ + dma_addr_t sq_pbl_addr; + dma_addr_t nvmetcp_cccid_itid_table_addr; + u16 nvmetcp_cccid_max_range; + u8 default_cq; + + /* Networking and TCP stack initializations */ + struct qed_nvmetcp_id_params src; + struct qed_nvmetcp_id_params dst; + u32 ka_timeout; + u32 ka_interval; + u32 max_rt_time; + u32 cwnd; + u16 mss; + u16 vlan_id; + bool timestamp_en; + bool delayed_ack_en; + bool tcp_keep_alive_en; + bool ecn_en; + u8 ip_version; + u8 ka_max_probe_cnt; + u8 ttl; + u8 tos_or_tc; + u8 rcv_wnd_scale; +}; + +struct qed_nvmetcp_params_update { + u32 max_io_size; + u32 max_recv_pdu_length; + u32 max_send_pdu_length; + + /* Placeholder: pfv, cpda, hpda */ + + bool hdr_digest_en; + bool data_digest_en; +}; + +struct qed_nvmetcp_cb_ops { + struct qed_common_cb_ops common; +}; + +struct nvmetcp_sge { + struct regpair sge_addr; /* SGE address */ + __le32 sge_len; /* SGE length */ + __le32 reserved; +}; + +/* IO path HSI function SGL params */ +struct storage_sgl_task_params { + struct nvmetcp_sge *sgl; + struct regpair sgl_phys_addr; + u32 total_buffer_size; + u16 num_sges; + bool small_mid_sge; +}; + +/* IO path HSI function FW task context params */ +struct nvmetcp_task_params { + void *context; /* Output parameter - set/filled by the HSI function */ + struct nvmetcp_wqe *sqe; + u32 tx_io_size; /* in bytes (Without DIF, if exists) */ + u32 rx_io_size; /* in bytes (Without DIF, if exists) */ + u16 conn_icid; + u16 itid; + struct regpair opq; /* qedn_task_ctx address */ + u16 host_cccid; + u8 cq_rss_number; + bool send_write_incapsule; +}; + +/** + * struct qed_nvmetcp_ops - qed NVMeTCP operations. + * @common: common operations pointer + * @ll2: light L2 operations pointer + * @fill_dev_info: fills NVMeTCP specific information + * @param cdev + * @param info + * @return 0 on success, otherwise error value. + * @register_ops: register nvmetcp operations + * @param cdev + * @param ops - specified using qed_nvmetcp_cb_ops + * @param cookie - driver private + * @start: nvmetcp in FW + * @param cdev + * @param tasks - qed will fill information about tasks + * return 0 on success, otherwise error value. + * @stop: nvmetcp in FW + * @param cdev + * return 0 on success, otherwise error value. + * @acquire_conn: acquire a new nvmetcp connection + * @param cdev + * @param handle - qed will fill handle that should be + * used henceforth as identifier of the + * connection. + * @param p_doorbell - qed will fill the address of the + * doorbell. + * @return 0 on sucesss, otherwise error value. + * @release_conn: release a previously acquired nvmetcp connection + * @param cdev + * @param handle - the connection handle. + * @return 0 on success, otherwise error value. + * @offload_conn: configures an offloaded connection + * @param cdev + * @param handle - the connection handle. + * @param conn_info - the configuration to use for the + * offload. + * @return 0 on success, otherwise error value. + * @update_conn: updates an offloaded connection + * @param cdev + * @param handle - the connection handle. + * @param conn_info - the configuration to use for the + * offload. + * @return 0 on success, otherwise error value. + * @destroy_conn: stops an offloaded connection + * @param cdev + * @param handle - the connection handle. + * @return 0 on success, otherwise error value. + * @clear_sq: clear all task in sq + * @param cdev + * @param handle - the connection handle. + * @return 0 on success, otherwise error value. + * @add_src_tcp_port_filter: Add source tcp port filter + * @param cdev + * @param src_port + * @remove_src_tcp_port_filter: Remove source tcp port filter + * @param cdev + * @param src_port + * @add_dst_tcp_port_filter: Add destination tcp port filter + * @param cdev + * @param dest_port + * @remove_dst_tcp_port_filter: Remove destination tcp port filter + * @param cdev + * @param dest_port + * @clear_all_filters: Clear all filters. + * @param cdev + */ +struct qed_nvmetcp_ops { + const struct qed_common_ops *common; + + const struct qed_ll2_ops *ll2; + + int (*fill_dev_info)(struct qed_dev *cdev, + struct qed_dev_nvmetcp_info *info); + + void (*register_ops)(struct qed_dev *cdev, + struct qed_nvmetcp_cb_ops *ops, void *cookie); + + int (*start)(struct qed_dev *cdev, + struct qed_nvmetcp_tid *tasks, + void *event_context, nvmetcp_event_cb_t async_event_cb); + + int (*stop)(struct qed_dev *cdev); + + int (*acquire_conn)(struct qed_dev *cdev, + u32 *handle, + u32 *fw_cid, void __iomem **p_doorbell); + + int (*release_conn)(struct qed_dev *cdev, u32 handle); + + int (*offload_conn)(struct qed_dev *cdev, + u32 handle, + struct qed_nvmetcp_params_offload *conn_info); + + int (*update_conn)(struct qed_dev *cdev, + u32 handle, + struct qed_nvmetcp_params_update *conn_info); + + int (*destroy_conn)(struct qed_dev *cdev, u32 handle, u8 abrt_conn); + + int (*clear_sq)(struct qed_dev *cdev, u32 handle); + + int (*add_src_tcp_port_filter)(struct qed_dev *cdev, u16 src_port); + + void (*remove_src_tcp_port_filter)(struct qed_dev *cdev, u16 src_port); + + int (*add_dst_tcp_port_filter)(struct qed_dev *cdev, u16 dest_port); + + void (*remove_dst_tcp_port_filter)(struct qed_dev *cdev, u16 dest_port); + + void (*clear_all_filters)(struct qed_dev *cdev); + + void (*init_read_io)(struct nvmetcp_task_params *task_params, + struct nvme_tcp_cmd_pdu *cmd_pdu_header, + struct nvme_command *nvme_cmd, + struct storage_sgl_task_params *sgl_task_params); + + void (*init_write_io)(struct nvmetcp_task_params *task_params, + struct nvme_tcp_cmd_pdu *cmd_pdu_header, + struct nvme_command *nvme_cmd, + struct storage_sgl_task_params *sgl_task_params); + + void (*init_icreq_exchange)(struct nvmetcp_task_params *task_params, + struct nvme_tcp_icreq_pdu *init_conn_req_pdu_hdr, + struct storage_sgl_task_params *tx_sgl_task_params, + struct storage_sgl_task_params *rx_sgl_task_params); + + void (*init_task_cleanup)(struct nvmetcp_task_params *task_params); +}; + +const struct qed_nvmetcp_ops *qed_get_nvmetcp_ops(void); +void qed_put_nvmetcp_ops(void); +#endif diff --git a/include/linux/qed/qed_nvmetcp_ip_services_if.h b/include/linux/qed/qed_nvmetcp_ip_services_if.h new file mode 100644 index 0000000000..3604aee537 --- /dev/null +++ b/include/linux/qed/qed_nvmetcp_ip_services_if.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ +/* + * Copyright 2021 Marvell. All rights reserved. + */ + +#ifndef _QED_IP_SERVICES_IF_H +#define _QED_IP_SERVICES_IF_H + +#include +#include +#include +#include + +int qed_route_ipv4(struct sockaddr_storage *local_addr, + struct sockaddr_storage *remote_addr, + struct sockaddr *hardware_address, + struct net_device **ndev); +int qed_route_ipv6(struct sockaddr_storage *local_addr, + struct sockaddr_storage *remote_addr, + struct sockaddr *hardware_address, + struct net_device **ndev); +void qed_vlan_get_ndev(struct net_device **ndev, u16 *vlan_id); +struct pci_dev *qed_validate_ndev(struct net_device *ndev); +void qed_return_tcp_port(struct socket *sock); +int qed_fetch_tcp_port(struct sockaddr_storage local_ip_addr, + struct socket **sock, u16 *port); +__be16 qed_get_in_port(struct sockaddr_storage *sa); + +#endif /* _QED_IP_SERVICES_IF_H */ diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 9455476c5b..d9680b798b 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -315,7 +315,7 @@ static inline int rcu_read_lock_any_held(void) #define RCU_LOCKDEP_WARN(c, s) \ do { \ static bool __section(".data.unlikely") __warned; \ - if (debug_lockdep_rcu_enabled() && !__warned && (c)) { \ + if ((c) && debug_lockdep_rcu_enabled() && !__warned) { \ __warned = true; \ lockdep_rcu_suspicious(__FILE__, __LINE__, s); \ } \ @@ -363,6 +363,20 @@ static inline void rcu_preempt_sleep_check(void) { } #define rcu_check_sparse(p, space) #endif /* #else #ifdef __CHECKER__ */ +/** + * unrcu_pointer - mark a pointer as not being RCU protected + * @p: pointer needing to lose its __rcu property + * + * Converts @p from an __rcu pointer to a __kernel pointer. + * This allows an __rcu pointer to be used with xchg() and friends. + */ +#define unrcu_pointer(p) \ +({ \ + typeof(*p) *_________p1 = (typeof(*p) *__force)(p); \ + rcu_check_sparse(p, __rcu); \ + ((typeof(*p) __force __kernel *)(_________p1)); \ +}) + #define __rcu_access_pointer(p, space) \ ({ \ typeof(*p) *_________p1 = (typeof(*p) *__force)READ_ONCE(p); \ @@ -518,7 +532,12 @@ do { \ * @p: The pointer to read, prior to dereferencing * @c: The conditions under which the dereference will take place * - * This is the RCU-bh counterpart to rcu_dereference_check(). + * This is the RCU-bh counterpart to rcu_dereference_check(). However, + * please note that starting in v5.0 kernels, vanilla RCU grace periods + * wait for local_bh_disable() regions of code in addition to regions of + * code demarked by rcu_read_lock() and rcu_read_unlock(). This means + * that synchronize_rcu(), call_rcu, and friends all take not only + * rcu_read_lock() but also rcu_read_lock_bh() into account. */ #define rcu_dereference_bh_check(p, c) \ __rcu_dereference_check((p), (c) || rcu_read_lock_bh_held(), __rcu) @@ -529,6 +548,11 @@ do { \ * @c: The conditions under which the dereference will take place * * This is the RCU-sched counterpart to rcu_dereference_check(). + * However, please note that starting in v5.0 kernels, vanilla RCU grace + * periods wait for preempt_disable() regions of code in addition to + * regions of code demarked by rcu_read_lock() and rcu_read_unlock(). + * This means that synchronize_rcu(), call_rcu, and friends all take not + * only rcu_read_lock() but also rcu_read_lock_sched() into account. */ #define rcu_dereference_sched_check(p, c) \ __rcu_dereference_check((p), (c) || rcu_read_lock_sched_held(), \ @@ -620,6 +644,12 @@ do { \ * sections, invocation of the corresponding RCU callback is deferred * until after the all the other CPUs exit their critical sections. * + * In v5.0 and later kernels, synchronize_rcu() and call_rcu() also + * wait for regions of code with preemption disabled, including regions of + * code with interrupts or softirqs disabled. In pre-v5.0 kernels, which + * define synchronize_sched(), only code enclosed within rcu_read_lock() + * and rcu_read_unlock() are guaranteed to be waited for. + * * Note, however, that RCU callbacks are permitted to run concurrently * with new RCU read-side critical sections. One way that this can happen * is via the following sequence of events: (1) CPU 0 enters an RCU @@ -672,33 +702,12 @@ static __always_inline void rcu_read_lock(void) /** * rcu_read_unlock() - marks the end of an RCU read-side critical section. * - * In most situations, rcu_read_unlock() is immune from deadlock. - * However, in kernels built with CONFIG_RCU_BOOST, rcu_read_unlock() - * is responsible for deboosting, which it does via rt_mutex_unlock(). - * Unfortunately, this function acquires the scheduler's runqueue and - * priority-inheritance spinlocks. This means that deadlock could result - * if the caller of rcu_read_unlock() already holds one of these locks or - * any lock that is ever acquired while holding them. - * - * That said, RCU readers are never priority boosted unless they were - * preempted. Therefore, one way to avoid deadlock is to make sure - * that preemption never happens within any RCU read-side critical - * section whose outermost rcu_read_unlock() is called with one of - * rt_mutex_unlock()'s locks held. Such preemption can be avoided in - * a number of ways, for example, by invoking preempt_disable() before - * critical section's outermost rcu_read_lock(). - * - * Given that the set of locks acquired by rt_mutex_unlock() might change - * at any time, a somewhat more future-proofed approach is to make sure - * that that preemption never happens within any RCU read-side critical - * section whose outermost rcu_read_unlock() is called with irqs disabled. - * This approach relies on the fact that rt_mutex_unlock() currently only - * acquires irq-disabled locks. - * - * The second of these two approaches is best in most situations, - * however, the first approach can also be useful, at least to those - * developers willing to keep abreast of the set of locks acquired by - * rt_mutex_unlock(). + * In almost all situations, rcu_read_unlock() is immune from deadlock. + * In recent kernels that have consolidated synchronize_sched() and + * synchronize_rcu_bh() into synchronize_rcu(), this deadlock immunity + * also extends to the scheduler's runqueue and priority-inheritance + * spinlocks, courtesy of the quiescent-state deferral that is carried + * out when rcu_read_unlock() is invoked with interrupts disabled. * * See rcu_read_lock() for more information. */ @@ -714,9 +723,11 @@ static inline void rcu_read_unlock(void) /** * rcu_read_lock_bh() - mark the beginning of an RCU-bh critical section * - * This is equivalent of rcu_read_lock(), but also disables softirqs. - * Note that anything else that disables softirqs can also serve as - * an RCU read-side critical section. + * This is equivalent to rcu_read_lock(), but also disables softirqs. + * Note that anything else that disables softirqs can also serve as an RCU + * read-side critical section. However, please note that this equivalence + * applies only to v5.0 and later. Before v5.0, rcu_read_lock() and + * rcu_read_lock_bh() were unrelated. * * Note that rcu_read_lock_bh() and the matching rcu_read_unlock_bh() * must occur in the same context, for example, it is illegal to invoke @@ -749,9 +760,12 @@ static inline void rcu_read_unlock_bh(void) /** * rcu_read_lock_sched() - mark the beginning of a RCU-sched critical section * - * This is equivalent of rcu_read_lock(), but disables preemption. - * Read-side critical sections can also be introduced by anything else - * that disables preemption, including local_irq_disable() and friends. + * This is equivalent to rcu_read_lock(), but also disables preemption. + * Read-side critical sections can also be introduced by anything else that + * disables preemption, including local_irq_disable() and friends. However, + * please note that the equivalence to rcu_read_lock() applies only to + * v5.0 and later. Before v5.0, rcu_read_lock() and rcu_read_lock_sched() + * were unrelated. * * Note that rcu_read_lock_sched() and the matching rcu_read_unlock_sched() * must occur in the same context, for example, it is illegal to invoke diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index 35e0be326f..953e70fafe 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h @@ -86,7 +86,6 @@ static inline void rcu_irq_enter(void) { } static inline void rcu_irq_exit_irqson(void) { } static inline void rcu_irq_enter_irqson(void) { } static inline void rcu_irq_exit(void) { } -static inline void rcu_irq_exit_preempt(void) { } static inline void rcu_irq_exit_check_preempt(void) { } #define rcu_is_idle_cpu(cpu) \ (is_idle_task(current) && !in_nmi() && !in_irq() && !in_serving_softirq()) diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index b89b54130f..53209d6694 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -49,7 +49,6 @@ void rcu_idle_enter(void); void rcu_idle_exit(void); void rcu_irq_enter(void); void rcu_irq_exit(void); -void rcu_irq_exit_preempt(void); void rcu_irq_enter_irqson(void); void rcu_irq_exit_irqson(void); bool rcu_is_idle_cpu(int cpu); diff --git a/include/linux/reboot.h b/include/linux/reboot.h index 3734cd8f38..af907a3d68 100644 --- a/include/linux/reboot.h +++ b/include/linux/reboot.h @@ -79,6 +79,7 @@ extern char poweroff_cmd[POWEROFF_CMD_PATH_LEN]; extern void orderly_poweroff(bool force); extern void orderly_reboot(void); +void hw_protection_shutdown(const char *reason, int ms_until_forced); /* * Emergency restart, callable from an interrupt handler. diff --git a/include/linux/regmap.h b/include/linux/regmap.h index f87a11a5cc..f5f08dd0a1 100644 --- a/include/linux/regmap.h +++ b/include/linux/regmap.h @@ -27,6 +27,7 @@ struct device_node; struct i2c_client; struct i3c_device; struct irq_domain; +struct mdio_device; struct slim_device; struct spi_device; struct spmi_device; @@ -502,6 +503,7 @@ typedef void (*regmap_hw_free_context)(void *context); * DEFAULT, BIG is assumed. * @max_raw_read: Max raw read size that can be used on the bus. * @max_raw_write: Max raw write size that can be used on the bus. + * @free_on_exit: kfree this on exit of regmap */ struct regmap_bus { bool fast_io; @@ -519,6 +521,7 @@ struct regmap_bus { enum regmap_endian val_format_endian_default; size_t max_raw_read; size_t max_raw_write; + bool free_on_exit; }; /* @@ -538,6 +541,10 @@ struct regmap *__regmap_init_i2c(struct i2c_client *i2c, const struct regmap_config *config, struct lock_class_key *lock_key, const char *lock_name); +struct regmap *__regmap_init_mdio(struct mdio_device *mdio_dev, + const struct regmap_config *config, + struct lock_class_key *lock_key, + const char *lock_name); struct regmap *__regmap_init_sccb(struct i2c_client *i2c, const struct regmap_config *config, struct lock_class_key *lock_key, @@ -594,6 +601,10 @@ struct regmap *__devm_regmap_init_i2c(struct i2c_client *i2c, const struct regmap_config *config, struct lock_class_key *lock_key, const char *lock_name); +struct regmap *__devm_regmap_init_mdio(struct mdio_device *mdio_dev, + const struct regmap_config *config, + struct lock_class_key *lock_key, + const char *lock_name); struct regmap *__devm_regmap_init_sccb(struct i2c_client *i2c, const struct regmap_config *config, struct lock_class_key *lock_key, @@ -697,6 +708,19 @@ int regmap_attach_dev(struct device *dev, struct regmap *map, __regmap_lockdep_wrapper(__regmap_init_i2c, #config, \ i2c, config) +/** + * regmap_init_mdio() - Initialise register map + * + * @mdio_dev: Device that will be interacted with + * @config: Configuration for register map + * + * The return value will be an ERR_PTR() on error or a valid pointer to + * a struct regmap. + */ +#define regmap_init_mdio(mdio_dev, config) \ + __regmap_lockdep_wrapper(__regmap_init_mdio, #config, \ + mdio_dev, config) + /** * regmap_init_sccb() - Initialise register map * @@ -888,6 +912,20 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg); __regmap_lockdep_wrapper(__devm_regmap_init_i2c, #config, \ i2c, config) +/** + * devm_regmap_init_mdio() - Initialise managed register map + * + * @mdio_dev: Device that will be interacted with + * @config: Configuration for register map + * + * The return value will be an ERR_PTR() on error or a valid pointer + * to a struct regmap. The regmap will be automatically freed by the + * device management code. + */ +#define devm_regmap_init_mdio(mdio_dev, config) \ + __regmap_lockdep_wrapper(__devm_regmap_init_mdio, #config, \ + mdio_dev, config) + /** * devm_regmap_init_sccb() - Initialise managed register map * @@ -1411,6 +1449,7 @@ struct regmap_irq_sub_irq_map { * @not_fixed_stride: Used when chip peripherals are not laid out with fixed * stride. Must be used with sub_reg_offsets containing the * offsets to each peripheral. + * @status_invert: Inverted status register: cleared bits are active interrupts. * @runtime_pm: Hold a runtime PM lock on the device when accessing it. * * @num_regs: Number of registers in each control bank. @@ -1463,6 +1502,7 @@ struct regmap_irq_chip { bool type_in_mask:1; bool clear_on_unmask:1; bool not_fixed_stride:1; + bool status_invert:1; int num_regs; diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h index 20e84a84fb..f72ca73631 100644 --- a/include/linux/regulator/consumer.h +++ b/include/linux/regulator/consumer.h @@ -119,6 +119,16 @@ struct regulator_dev; #define REGULATOR_EVENT_PRE_DISABLE 0x400 #define REGULATOR_EVENT_ABORT_DISABLE 0x800 #define REGULATOR_EVENT_ENABLE 0x1000 +/* + * Following notifications should be emitted only if detected condition + * is such that the HW is likely to still be working but consumers should + * take a recovery action to prevent problems esacalating into errors. + */ +#define REGULATOR_EVENT_UNDER_VOLTAGE_WARN 0x2000 +#define REGULATOR_EVENT_OVER_CURRENT_WARN 0x4000 +#define REGULATOR_EVENT_OVER_VOLTAGE_WARN 0x8000 +#define REGULATOR_EVENT_OVER_TEMP_WARN 0x10000 +#define REGULATOR_EVENT_WARN_MASK 0x1E000 /* * Regulator errors that can be queried using regulator_get_error_flags @@ -138,6 +148,10 @@ struct regulator_dev; #define REGULATOR_ERROR_FAIL BIT(4) #define REGULATOR_ERROR_OVER_TEMP BIT(5) +#define REGULATOR_ERROR_UNDER_VOLTAGE_WARN BIT(6) +#define REGULATOR_ERROR_OVER_CURRENT_WARN BIT(7) +#define REGULATOR_ERROR_OVER_VOLTAGE_WARN BIT(8) +#define REGULATOR_ERROR_OVER_TEMP_WARN BIT(9) /** * struct pre_voltage_change_data - Data sent with PRE_VOLTAGE_CHANGE event diff --git a/include/linux/regulator/coupler.h b/include/linux/regulator/coupler.h index 5f86824bd1..73291f280a 100644 --- a/include/linux/regulator/coupler.h +++ b/include/linux/regulator/coupler.h @@ -52,7 +52,6 @@ struct regulator_coupler { #ifdef CONFIG_REGULATOR int regulator_coupler_register(struct regulator_coupler *coupler); -const char *rdev_get_name(struct regulator_dev *rdev); int regulator_check_consumers(struct regulator_dev *rdev, int *min_uV, int *max_uV, suspend_state_t state); @@ -69,10 +68,6 @@ static inline int regulator_coupler_register(struct regulator_coupler *coupler) { return 0; } -static inline const char *rdev_get_name(struct regulator_dev *rdev) -{ - return NULL; -} static inline int regulator_check_consumers(struct regulator_dev *rdev, int *min_uV, int *max_uV, suspend_state_t state) diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h index 4ea520c248..4aec203878 100644 --- a/include/linux/regulator/driver.h +++ b/include/linux/regulator/driver.h @@ -40,6 +40,15 @@ enum regulator_status { REGULATOR_STATUS_UNDEFINED, }; +enum regulator_detection_severity { + /* Hardware shut down voltage outputs if condition is detected */ + REGULATOR_SEVERITY_PROT, + /* Hardware is probably damaged/inoperable */ + REGULATOR_SEVERITY_ERR, + /* Hardware is still recoverable but recovery action must be taken */ + REGULATOR_SEVERITY_WARN, +}; + /* Initialize struct linear_range for regulators */ #define REGULATOR_LINEAR_RANGE(_min_uV, _min_sel, _max_sel, _step_uV) \ { \ @@ -78,8 +87,25 @@ enum regulator_status { * @get_current_limit: Get the configured limit for a current-limited regulator. * @set_input_current_limit: Configure an input limit. * - * @set_over_current_protection: Support capability of automatically shutting - * down when detecting an over current event. + * @set_over_current_protection: Support enabling of and setting limits for over + * current situation detection. Detection can be configured for three + * levels of severity. + * REGULATOR_SEVERITY_PROT should automatically shut down the regulator(s). + * REGULATOR_SEVERITY_ERR should indicate that over-current situation is + * caused by an unrecoverable error but HW does not perform + * automatic shut down. + * REGULATOR_SEVERITY_WARN should indicate situation where hardware is + * still believed to not be damaged but that a board sepcific + * recovery action is needed. If lim_uA is 0 the limit should not + * be changed but the detection should just be enabled/disabled as + * is requested. + * @set_over_voltage_protection: Support enabling of and setting limits for over + * voltage situation detection. Detection can be configured for same + * severities as over current protection. + * @set_under_voltage_protection: Support enabling of and setting limits for + * under situation detection. + * @set_thermal_protection: Support enabling of and setting limits for over + * temperature situation detection. * * @set_active_discharge: Set active discharge enable/disable of regulators. * @@ -143,8 +169,15 @@ struct regulator_ops { int (*get_current_limit) (struct regulator_dev *); int (*set_input_current_limit) (struct regulator_dev *, int lim_uA); - int (*set_over_current_protection) (struct regulator_dev *); - int (*set_active_discharge) (struct regulator_dev *, bool enable); + int (*set_over_current_protection)(struct regulator_dev *, int lim_uA, + int severity, bool enable); + int (*set_over_voltage_protection)(struct regulator_dev *, int lim_uV, + int severity, bool enable); + int (*set_under_voltage_protection)(struct regulator_dev *, int lim_uV, + int severity, bool enable); + int (*set_thermal_protection)(struct regulator_dev *, int lim, + int severity, bool enable); + int (*set_active_discharge)(struct regulator_dev *, bool enable); /* enable/disable regulator */ int (*enable) (struct regulator_dev *); @@ -413,6 +446,128 @@ struct regulator_config { struct gpio_desc *ena_gpiod; }; +/** + * struct regulator_err_state - regulator error/notification status + * + * @rdev: Regulator which status the struct indicates. + * @notifs: Events which have occurred on the regulator. + * @errors: Errors which are active on the regulator. + * @possible_errs: Errors which can be signaled (by given IRQ). + */ +struct regulator_err_state { + struct regulator_dev *rdev; + unsigned long notifs; + unsigned long errors; + int possible_errs; +}; + +/** + * struct regulator_irq_data - regulator error/notification status date + * + * @states: Status structs for each of the associated regulators. + * @num_states: Amount of associated regulators. + * @data: Driver data pointer given at regulator_irq_desc. + * @opaque: Value storage for IC driver. Core does not update this. ICs + * may want to store status register value here at map_event and + * compare contents at 'renable' callback to see if new problems + * have been added to status. If that is the case it may be + * desirable to return REGULATOR_ERROR_CLEARED and not + * REGULATOR_ERROR_ON to allow IRQ fire again and to generate + * notifications also for the new issues. + * + * This structure is passed to 'map_event' and 'renable' callbacks for + * reporting regulator status to core. + */ +struct regulator_irq_data { + struct regulator_err_state *states; + int num_states; + void *data; + long opaque; +}; + +/** + * struct regulator_irq_desc - notification sender for IRQ based events. + * + * @name: The visible name for the IRQ + * @fatal_cnt: If this IRQ is used to signal HW damaging condition it may be + * best to shut-down regulator(s) or reboot the SOC if error + * handling is repeatedly failing. If fatal_cnt is given the IRQ + * handling is aborted if it fails for fatal_cnt times and die() + * callback (if populated) or BUG() is called to try to prevent + * further damage. + * @reread_ms: The time which is waited before attempting to re-read status + * at the worker if IC reading fails. Immediate re-read is done + * if time is not specified. + * @irq_off_ms: The time which IRQ is kept disabled before re-evaluating the + * status for devices which keep IRQ disabled for duration of the + * error. If this is not given the IRQ is left enabled and renable + * is not called. + * @skip_off: If set to true the IRQ handler will attempt to check if any of + * the associated regulators are enabled prior to taking other + * actions. If no regulators are enabled and this is set to true + * a spurious IRQ is assumed and IRQ_NONE is returned. + * @high_prio: Boolean to indicate that high priority WQ should be used. + * @data: Driver private data pointer which will be passed as such to + * the renable, map_event and die callbacks in regulator_irq_data. + * @die: Protection callback. If IC status reading or recovery actions + * fail fatal_cnt times this callback or BUG() is called. This + * callback should implement a final protection attempt like + * disabling the regulator. If protection succeeded this may + * return 0. If anything else is returned the core assumes final + * protection failed and calls BUG() as a last resort. + * @map_event: Driver callback to map IRQ status into regulator devices with + * events / errors. NOTE: callback MUST initialize both the + * errors and notifs for all rdevs which it signals having + * active events as core does not clean the map data. + * REGULATOR_FAILED_RETRY can be returned to indicate that the + * status reading from IC failed. If this is repeated for + * fatal_cnt times the core will call die() callback or BUG() + * as a last resort to protect the HW. + * @renable: Optional callback to check status (if HW supports that) before + * re-enabling IRQ. If implemented this should clear the error + * flags so that errors fetched by regulator_get_error_flags() + * are updated. If callback is not implemented then errors are + * assumed to be cleared and IRQ is re-enabled. + * REGULATOR_FAILED_RETRY can be returned to + * indicate that the status reading from IC failed. If this is + * repeated for 'fatal_cnt' times the core will call die() + * callback or BUG() as a last resort to protect the HW. + * Returning zero indicates that the problem in HW has been solved + * and IRQ will be re-enabled. Returning REGULATOR_ERROR_ON + * indicates the error condition is still active and keeps IRQ + * disabled. Please note that returning REGULATOR_ERROR_ON does + * not retrigger evaluating what events are active or resending + * notifications. If this is needed you probably want to return + * zero and allow IRQ to retrigger causing events to be + * re-evaluated and re-sent. + * + * This structure is used for registering regulator IRQ notification helper. + */ +struct regulator_irq_desc { + const char *name; + int irq_flags; + int fatal_cnt; + int reread_ms; + int irq_off_ms; + bool skip_off; + bool high_prio; + void *data; + + int (*die)(struct regulator_irq_data *rid); + int (*map_event)(int irq, struct regulator_irq_data *rid, + unsigned long *dev_mask); + int (*renable)(struct regulator_irq_data *rid); +}; + +/* + * Return values for regulator IRQ helpers. + */ +enum { + REGULATOR_ERROR_CLEARED, + REGULATOR_FAILED_RETRY, + REGULATOR_ERROR_ON, +}; + /* * struct coupling_desc * @@ -477,6 +632,9 @@ struct regulator_dev { /* time when this regulator was disabled last time */ ktime_t last_off; + int cached_err; + bool use_cached_err; + spinlock_t err_lock; }; struct regulator_dev * @@ -491,6 +649,16 @@ void devm_regulator_unregister(struct device *dev, struct regulator_dev *rdev); int regulator_notifier_call_chain(struct regulator_dev *rdev, unsigned long event, void *data); +void *devm_regulator_irq_helper(struct device *dev, + const struct regulator_irq_desc *d, int irq, + int irq_flags, int common_errs, + int *per_rdev_errs, struct regulator_dev **rdev, + int rdev_amount); +void *regulator_irq_helper(struct device *dev, + const struct regulator_irq_desc *d, int irq, + int irq_flags, int common_errs, int *per_rdev_errs, + struct regulator_dev **rdev, int rdev_amount); +void regulator_irq_helper_cancel(void **handle); void *rdev_get_drvdata(struct regulator_dev *rdev); struct device *rdev_get_dev(struct regulator_dev *rdev); @@ -540,6 +708,7 @@ int regulator_set_current_limit_regmap(struct regulator_dev *rdev, int regulator_get_current_limit_regmap(struct regulator_dev *rdev); void *regulator_get_init_drvdata(struct regulator_init_data *reg_init_data); int regulator_set_ramp_delay_regmap(struct regulator_dev *rdev, int ramp_delay); +int regulator_sync_voltage_rdev(struct regulator_dev *rdev); /* * Helper functions intended to be used by regulator drivers prior registering @@ -550,4 +719,14 @@ int regulator_desc_list_voltage_linear_range(const struct regulator_desc *desc, int regulator_desc_list_voltage_linear(const struct regulator_desc *desc, unsigned int selector); + +#ifdef CONFIG_REGULATOR +const char *rdev_get_name(struct regulator_dev *rdev); +#else +static inline const char *rdev_get_name(struct regulator_dev *rdev) +{ + return NULL; +} +#endif + #endif diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h index 8a56f033b6..68b4a514a4 100644 --- a/include/linux/regulator/machine.h +++ b/include/linux/regulator/machine.h @@ -83,6 +83,14 @@ struct regulator_state { bool changeable; }; +#define REGULATOR_NOTIF_LIMIT_DISABLE -1 +#define REGULATOR_NOTIF_LIMIT_ENABLE -2 +struct notification_limit { + int prot; + int err; + int warn; +}; + /** * struct regulation_constraints - regulator operating constraints. * @@ -100,6 +108,11 @@ struct regulator_state { * @ilim_uA: Maximum input current. * @system_load: Load that isn't captured by any consumer requests. * + * @over_curr_limits: Limits for acting on over current. + * @over_voltage_limits: Limits for acting on over voltage. + * @under_voltage_limits: Limits for acting on under voltage. + * @temp_limits: Limits for acting on over temperature. + * @max_spread: Max possible spread between coupled regulators * @max_uV_step: Max possible step change in voltage * @valid_modes_mask: Mask of modes which may be configured by consumers. @@ -116,6 +129,11 @@ struct regulator_state { * @pull_down: Enable pull down when regulator is disabled. * @over_current_protection: Auto disable on over current event. * + * @over_current_detection: Configure over current limits. + * @over_voltage_detection: Configure over voltage limits. + * @under_voltage_detection: Configure under voltage limits. + * @over_temp_detection: Configure over temperature limits. + * * @input_uV: Input voltage for regulator when supplied by another regulator. * * @state_disk: State for regulator when system is suspended in disk mode. @@ -172,6 +190,10 @@ struct regulation_constraints { struct regulator_state state_disk; struct regulator_state state_mem; struct regulator_state state_standby; + struct notification_limit over_curr_limits; + struct notification_limit over_voltage_limits; + struct notification_limit under_voltage_limits; + struct notification_limit temp_limits; suspend_state_t initial_state; /* suspend state to set at init */ /* mode to set on startup */ @@ -193,6 +215,10 @@ struct regulation_constraints { unsigned soft_start:1; /* ramp voltage slowly */ unsigned pull_down:1; /* pull down resistor when regulator off */ unsigned over_current_protection:1; /* auto disable on over current */ + unsigned over_current_detection:1; /* notify on over current */ + unsigned over_voltage_detection:1; /* notify on over voltage */ + unsigned under_voltage_detection:1; /* notify on under voltage */ + unsigned over_temp_detection:1; /* notify on over temperature */ }; /** diff --git a/include/linux/regulator/mt6359-regulator.h b/include/linux/regulator/mt6359-regulator.h new file mode 100644 index 0000000000..6d6e5a58f4 --- /dev/null +++ b/include/linux/regulator/mt6359-regulator.h @@ -0,0 +1,59 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2021 MediaTek Inc. + */ + +#ifndef __LINUX_REGULATOR_MT6359_H +#define __LINUX_REGULATOR_MT6359_H + +enum { + MT6359_ID_VS1 = 0, + MT6359_ID_VGPU11, + MT6359_ID_VMODEM, + MT6359_ID_VPU, + MT6359_ID_VCORE, + MT6359_ID_VS2, + MT6359_ID_VPA, + MT6359_ID_VPROC2, + MT6359_ID_VPROC1, + MT6359_ID_VCORE_SSHUB, + MT6359_ID_VGPU11_SSHUB = MT6359_ID_VCORE_SSHUB, + MT6359_ID_VAUD18 = 10, + MT6359_ID_VSIM1, + MT6359_ID_VIBR, + MT6359_ID_VRF12, + MT6359_ID_VUSB, + MT6359_ID_VSRAM_PROC2, + MT6359_ID_VIO18, + MT6359_ID_VCAMIO, + MT6359_ID_VCN18, + MT6359_ID_VFE28, + MT6359_ID_VCN13, + MT6359_ID_VCN33_1_BT, + MT6359_ID_VCN33_1_WIFI, + MT6359_ID_VAUX18, + MT6359_ID_VSRAM_OTHERS, + MT6359_ID_VEFUSE, + MT6359_ID_VXO22, + MT6359_ID_VRFCK, + MT6359_ID_VBIF28, + MT6359_ID_VIO28, + MT6359_ID_VEMC, + MT6359_ID_VCN33_2_BT, + MT6359_ID_VCN33_2_WIFI, + MT6359_ID_VA12, + MT6359_ID_VA09, + MT6359_ID_VRF18, + MT6359_ID_VSRAM_MD, + MT6359_ID_VUFS, + MT6359_ID_VM18, + MT6359_ID_VBBCK, + MT6359_ID_VSRAM_PROC1, + MT6359_ID_VSIM2, + MT6359_ID_VSRAM_OTHERS_SSHUB, + MT6359_ID_RG_MAX, +}; + +#define MT6359_MAX_REGULATOR MT6359_ID_RG_MAX + +#endif /* __LINUX_REGULATOR_MT6359_H */ diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h index 8b795b544f..a5b37bc108 100644 --- a/include/linux/remoteproc.h +++ b/include/linux/remoteproc.h @@ -243,7 +243,7 @@ struct fw_rsc_trace { * @da: device address * @align: the alignment between the consumer and producer parts of the vring * @num: num of buffers supported by this vring (must be power of two) - * @notifyid is a unique rproc-wide notify index for this vring. This notify + * @notifyid: a unique rproc-wide notify index for this vring. This notify * index is used when kicking a remote processor, to let it know that this * vring is triggered. * @pa: physical address @@ -266,18 +266,18 @@ struct fw_rsc_vdev_vring { /** * struct fw_rsc_vdev - virtio device header * @id: virtio device id (as in virtio_ids.h) - * @notifyid is a unique rproc-wide notify index for this vdev. This notify + * @notifyid: a unique rproc-wide notify index for this vdev. This notify * index is used when kicking a remote processor, to let it know that the * status/features of this vdev have changes. - * @dfeatures specifies the virtio device features supported by the firmware - * @gfeatures is a place holder used by the host to write back the + * @dfeatures: specifies the virtio device features supported by the firmware + * @gfeatures: a place holder used by the host to write back the * negotiated features that are supported by both sides. - * @config_len is the size of the virtio config space of this vdev. The config + * @config_len: the size of the virtio config space of this vdev. The config * space lies in the resource table immediate after this vdev header. - * @status is a place holder where the host will indicate its virtio progress. - * @num_of_vrings indicates how many vrings are described in this vdev header + * @status: a place holder where the host will indicate its virtio progress. + * @num_of_vrings: indicates how many vrings are described in this vdev header * @reserved: reserved (must be zero) - * @vring is an array of @num_of_vrings entries of 'struct fw_rsc_vdev_vring'. + * @vring: an array of @num_of_vrings entries of 'struct fw_rsc_vdev_vring'. * * This resource is a virtio device header: it provides information about * the vdev, and is then used by the host and its peer remote processors @@ -287,16 +287,17 @@ struct fw_rsc_vdev_vring { * to statically allocate a vdev upon registration of the rproc (dynamic vdev * allocation is not yet supported). * - * Note: unlike virtualization systems, the term 'host' here means - * the Linux side which is running remoteproc to control the remote - * processors. We use the name 'gfeatures' to comply with virtio's terms, - * though there isn't really any virtualized guest OS here: it's the host - * which is responsible for negotiating the final features. - * Yeah, it's a bit confusing. + * Note: + * 1. unlike virtualization systems, the term 'host' here means + * the Linux side which is running remoteproc to control the remote + * processors. We use the name 'gfeatures' to comply with virtio's terms, + * though there isn't really any virtualized guest OS here: it's the host + * which is responsible for negotiating the final features. + * Yeah, it's a bit confusing. * - * Note: immediately following this structure is the virtio config space for - * this vdev (which is specific to the vdev; for more info, read the virtio - * spec). the size of the config space is specified by @config_len. + * 2. immediately following this structure is the virtio config space for + * this vdev (which is specific to the vdev; for more info, read the virtio + * spec). The size of the config space is specified by @config_len. */ struct fw_rsc_vdev { u32 id; @@ -440,7 +441,7 @@ enum rproc_state { * enum rproc_crash_type - remote processor crash types * @RPROC_MMUFAULT: iommu fault * @RPROC_WATCHDOG: watchdog bite - * @RPROC_FATAL_ERROR fatal error + * @RPROC_FATAL_ERROR: fatal error * * Each element of the enum is used as an array index. So that, the value of * the elements should be always something sane. @@ -457,9 +458,9 @@ enum rproc_crash_type { * enum rproc_dump_mechanism - Coredump options for core * @RPROC_COREDUMP_DISABLED: Don't perform any dump * @RPROC_COREDUMP_ENABLED: Copy dump to separate buffer and carry on with - recovery + * recovery * @RPROC_COREDUMP_INLINE: Read segments directly from device memory. Stall - recovery until all segments are read + * recovery until all segments are read */ enum rproc_dump_mechanism { RPROC_COREDUMP_DISABLED, @@ -475,6 +476,7 @@ enum rproc_dump_mechanism { * @priv: private data associated with the dump_segment * @dump: custom dump function to fill device memory segment associated * with coredump + * @offset: offset of the segment */ struct rproc_dump_segment { struct list_head node; @@ -524,7 +526,9 @@ struct rproc_dump_segment { * @auto_boot: flag to indicate if remote processor should be auto-started * @dump_segments: list of segments in the firmware * @nb_vdev: number of vdev currently handled by rproc - * @char_dev: character device of the rproc + * @elf_class: firmware ELF class + * @elf_machine: firmware ELF machine + * @cdev: character device of the rproc * @cdev_put_on_release: flag to indicate if remoteproc should be shutdown on @char_dev release */ struct rproc { @@ -613,10 +617,10 @@ struct rproc_vring { * struct rproc_vdev - remoteproc state for a supported virtio device * @refcount: reference counter for the vdev and vring allocations * @subdev: handle for registering the vdev as a rproc subdevice + * @dev: device struct used for reference count semantics * @id: virtio device id (as in virtio_ids.h) * @node: list node * @rproc: the rproc handle - * @vdev: the virio device * @vring: the vrings for this vdev * @rsc_offset: offset of the vdev's resource entry * @index: vdev position versus other vdev declared in resource table diff --git a/include/linux/reset-controller.h b/include/linux/reset-controller.h index ec35814e0b..0fa4f60e11 100644 --- a/include/linux/reset-controller.h +++ b/include/linux/reset-controller.h @@ -79,6 +79,7 @@ struct reset_controller_dev { unsigned int nr_resets; }; +#if IS_ENABLED(CONFIG_RESET_CONTROLLER) int reset_controller_register(struct reset_controller_dev *rcdev); void reset_controller_unregister(struct reset_controller_dev *rcdev); @@ -88,5 +89,26 @@ int devm_reset_controller_register(struct device *dev, void reset_controller_add_lookup(struct reset_control_lookup *lookup, unsigned int num_entries); +#else +static inline int reset_controller_register(struct reset_controller_dev *rcdev) +{ + return 0; +} + +static inline void reset_controller_unregister(struct reset_controller_dev *rcdev) +{ +} + +static inline int devm_reset_controller_register(struct device *dev, + struct reset_controller_dev *rcdev) +{ + return 0; +} + +static inline void reset_controller_add_lookup(struct reset_control_lookup *lookup, + unsigned int num_entries) +{ +} +#endif #endif diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 8d04e7deed..c976cc6de2 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -86,9 +86,6 @@ struct anon_vma_chain { }; enum ttu_flags { - TTU_MIGRATION = 0x1, /* migration mode */ - TTU_MUNLOCK = 0x2, /* munlock mode */ - TTU_SPLIT_HUGE_PMD = 0x4, /* split huge PMD if any */ TTU_IGNORE_MLOCK = 0x8, /* ignore mlock */ TTU_SYNC = 0x10, /* avoid racy checks with PVMW_SYNC */ @@ -98,7 +95,6 @@ enum ttu_flags { * do a final flush if necessary */ TTU_RMAP_LOCKED = 0x80, /* do not grab rmap lock: * caller holds it */ - TTU_SPLIT_FREEZE = 0x100, /* freeze pte under splitting thp */ }; #ifdef CONFIG_MMU @@ -195,7 +191,12 @@ static inline void page_dup_rmap(struct page *page, bool compound) int page_referenced(struct page *, int is_locked, struct mem_cgroup *memcg, unsigned long *vm_flags); -bool try_to_unmap(struct page *, enum ttu_flags flags); +void try_to_migrate(struct page *page, enum ttu_flags flags); +void try_to_unmap(struct page *, enum ttu_flags flags); + +int make_device_exclusive_range(struct mm_struct *mm, unsigned long start, + unsigned long end, struct page **pages, + void *arg); /* Avoid racy checks */ #define PVMW_SYNC (1 << 0) @@ -240,7 +241,7 @@ int page_mkclean(struct page *); * called in munlock()/munmap() path to check for other vmas holding * the page mlocked. */ -void try_to_munlock(struct page *); +void page_mlock(struct page *page); void remove_migration_ptes(struct page *old, struct page *new, bool locked); @@ -290,7 +291,9 @@ static inline int page_referenced(struct page *page, int is_locked, return 0; } -#define try_to_unmap(page, refs) false +static inline void try_to_unmap(struct page *page, enum ttu_flags flags) +{ +} static inline int page_mkclean(struct page *page) { diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h index 6f70572b29..ecf8748481 100644 --- a/include/linux/scatterlist.h +++ b/include/linux/scatterlist.h @@ -474,7 +474,7 @@ sg_page_iter_dma_address(struct sg_dma_page_iter *dma_iter) * Iterates over sg entries mapping page-by-page. On each successful * iteration, @miter->page points to the mapped page and * @miter->length bytes of data can be accessed at @miter->addr. As - * long as an interation is enclosed between start and stop, the user + * long as an iteration is enclosed between start and stop, the user * is free to choose control structure and when to stop. * * @miter->consumed is set to @miter->length on each iteration. It diff --git a/include/linux/sched.h b/include/linux/sched.h index 32813c3451..ec8d07d886 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -113,11 +113,13 @@ struct task_group; __TASK_TRACED | EXIT_DEAD | EXIT_ZOMBIE | \ TASK_PARKED) -#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0) +#define task_is_running(task) (READ_ONCE((task)->__state) == TASK_RUNNING) -#define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0) +#define task_is_traced(task) ((READ_ONCE(task->__state) & __TASK_TRACED) != 0) -#define task_is_stopped_or_traced(task) ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0) +#define task_is_stopped(task) ((READ_ONCE(task->__state) & __TASK_STOPPED) != 0) + +#define task_is_stopped_or_traced(task) ((READ_ONCE(task->__state) & (__TASK_STOPPED | __TASK_TRACED)) != 0) #ifdef CONFIG_DEBUG_ATOMIC_SLEEP @@ -132,14 +134,14 @@ struct task_group; do { \ WARN_ON_ONCE(is_special_task_state(state_value));\ current->task_state_change = _THIS_IP_; \ - current->state = (state_value); \ + WRITE_ONCE(current->__state, (state_value)); \ } while (0) #define set_current_state(state_value) \ do { \ WARN_ON_ONCE(is_special_task_state(state_value));\ current->task_state_change = _THIS_IP_; \ - smp_store_mb(current->state, (state_value)); \ + smp_store_mb(current->__state, (state_value)); \ } while (0) #define set_special_state(state_value) \ @@ -148,7 +150,7 @@ struct task_group; WARN_ON_ONCE(!is_special_task_state(state_value)); \ raw_spin_lock_irqsave(¤t->pi_lock, flags); \ current->task_state_change = _THIS_IP_; \ - current->state = (state_value); \ + WRITE_ONCE(current->__state, (state_value)); \ raw_spin_unlock_irqrestore(¤t->pi_lock, flags); \ } while (0) #else @@ -190,10 +192,10 @@ struct task_group; * Also see the comments of try_to_wake_up(). */ #define __set_current_state(state_value) \ - current->state = (state_value) + WRITE_ONCE(current->__state, (state_value)) #define set_current_state(state_value) \ - smp_store_mb(current->state, (state_value)) + smp_store_mb(current->__state, (state_value)) /* * set_special_state() should be used for those states when the blocking task @@ -205,12 +207,14 @@ struct task_group; do { \ unsigned long flags; /* may shadow */ \ raw_spin_lock_irqsave(¤t->pi_lock, flags); \ - current->state = (state_value); \ + WRITE_ONCE(current->__state, (state_value)); \ raw_spin_unlock_irqrestore(¤t->pi_lock, flags); \ } while (0) #endif +#define get_current_state() READ_ONCE(current->__state) + /* Task command name length: */ #define TASK_COMM_LEN 16 @@ -662,8 +666,7 @@ struct task_struct { */ struct thread_info thread_info; #endif - /* -1 unrunnable, 0 runnable, >0 stopped: */ - volatile long state; + unsigned int __state; /* * This begins the randomizable portion of task_struct. Only @@ -708,10 +711,17 @@ struct task_struct { const struct sched_class *sched_class; struct sched_entity se; struct sched_rt_entity rt; + struct sched_dl_entity dl; + +#ifdef CONFIG_SCHED_CORE + struct rb_node core_node; + unsigned long core_cookie; + unsigned int core_occupation; +#endif + #ifdef CONFIG_CGROUP_SCHED struct task_group *sched_task_group; #endif - struct sched_dl_entity dl; #ifdef CONFIG_UCLAMP_TASK /* @@ -1520,7 +1530,7 @@ static inline pid_t task_pgrp_nr(struct task_struct *tsk) static inline unsigned int task_state_index(struct task_struct *tsk) { - unsigned int tsk_state = READ_ONCE(tsk->state); + unsigned int tsk_state = READ_ONCE(tsk->__state); unsigned int state = (tsk_state | tsk->exit_state) & TASK_REPORT; BUILD_BUG_ON_NOT_POWER_OF_2(TASK_REPORT_MAX); @@ -1828,10 +1838,10 @@ static __always_inline void scheduler_ipi(void) */ preempt_fold_need_resched(); } -extern unsigned long wait_task_inactive(struct task_struct *, long match_state); +extern unsigned long wait_task_inactive(struct task_struct *, unsigned int match_state); #else static inline void scheduler_ipi(void) { } -static inline unsigned long wait_task_inactive(struct task_struct *p, long match_state) +static inline unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state) { return 1; } @@ -2018,6 +2028,8 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) #endif /* CONFIG_SMP */ +extern bool sched_task_on_rq(struct task_struct *p); + /* * In order to reduce various lock holder preemption latencies provide an * interface to see if a vCPU is currently running or not. @@ -2179,4 +2191,14 @@ int sched_trace_rq_nr_running(struct rq *rq); const struct cpumask *sched_trace_rd_span(struct root_domain *rd); +#ifdef CONFIG_SCHED_CORE +extern void sched_core_free(struct task_struct *tsk); +extern void sched_core_fork(struct task_struct *p); +extern int sched_core_share_pid(unsigned int cmd, pid_t pid, enum pid_type type, + unsigned long uaddr); +#else +static inline void sched_core_free(struct task_struct *tsk) { } +static inline void sched_core_fork(struct task_struct *p) { } +#endif + #endif diff --git a/include/linux/sched/coredump.h b/include/linux/sched/coredump.h index dfd82eab29..4d9e3a6568 100644 --- a/include/linux/sched/coredump.h +++ b/include/linux/sched/coredump.h @@ -73,6 +73,14 @@ static inline int get_dumpable(struct mm_struct *mm) #define MMF_OOM_VICTIM 25 /* mm is the oom victim */ #define MMF_OOM_REAP_QUEUED 26 /* mm was queued for oom_reaper */ #define MMF_MULTIPROCESS 27 /* mm is shared between processes */ +/* + * MMF_HAS_PINNED: Whether this mm has pinned any pages. This can be either + * replaced in the future by mm.pinned_vm when it becomes stable, or grow into + * a counter on its own. We're aggresive on this bit for now: even if the + * pinned pages were unpinned later on, we'll still keep this bit set for the + * lifecycle of this mm, just for simplicity. + */ +#define MMF_HAS_PINNED 28 /* FOLL_PIN has run, never cleared */ #define MMF_DISABLE_THP_MASK (1 << MMF_DISABLE_THP) #define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\ diff --git a/include/linux/sched/cpufreq.h b/include/linux/sched/cpufreq.h index 6205578ab6..bdd31ab93b 100644 --- a/include/linux/sched/cpufreq.h +++ b/include/linux/sched/cpufreq.h @@ -26,7 +26,7 @@ bool cpufreq_this_cpu_can_update(struct cpufreq_policy *policy); static inline unsigned long map_util_freq(unsigned long util, unsigned long freq, unsigned long cap) { - return (freq + (freq >> 2)) * util / cap; + return freq * util / cap; } static inline unsigned long map_util_perf(unsigned long util) diff --git a/include/linux/sched/debug.h b/include/linux/sched/debug.h index ae51f4529f..b5035afa23 100644 --- a/include/linux/sched/debug.h +++ b/include/linux/sched/debug.h @@ -14,7 +14,7 @@ extern void dump_cpu_task(int cpu); /* * Only dump TASK_* tasks. (0 for all tasks) */ -extern void show_state_filter(unsigned long state_filter); +extern void show_state_filter(unsigned int state_filter); static inline void show_state(void) { diff --git a/include/linux/sched/sd_flags.h b/include/linux/sched/sd_flags.h index 34b21e971d..57bde66d95 100644 --- a/include/linux/sched/sd_flags.h +++ b/include/linux/sched/sd_flags.h @@ -90,6 +90,16 @@ SD_FLAG(SD_WAKE_AFFINE, SDF_SHARED_CHILD) */ SD_FLAG(SD_ASYM_CPUCAPACITY, SDF_SHARED_PARENT | SDF_NEEDS_GROUPS) +/* + * Domain members have different CPU capacities spanning all unique CPU + * capacity values. + * + * SHARED_PARENT: Set from the topmost domain down to the first domain where + * all available CPU capacities are visible + * NEEDS_GROUPS: Per-CPU capacity is asymmetric between groups. + */ +SD_FLAG(SD_ASYM_CPUCAPACITY_FULL, SDF_SHARED_PARENT | SDF_NEEDS_GROUPS) + /* * Domain members share CPU capacity (i.e. SMT) * diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h index 7f4278fa21..b9126fe06c 100644 --- a/include/linux/sched/signal.h +++ b/include/linux/sched/signal.h @@ -382,7 +382,7 @@ static inline int fatal_signal_pending(struct task_struct *p) return task_sigpending(p) && __fatal_signal_pending(p); } -static inline int signal_pending_state(long state, struct task_struct *p) +static inline int signal_pending_state(unsigned int state, struct task_struct *p) { if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL))) return 0; @@ -538,6 +538,17 @@ static inline int kill_cad_pid(int sig, int priv) #define SEND_SIG_NOINFO ((struct kernel_siginfo *) 0) #define SEND_SIG_PRIV ((struct kernel_siginfo *) 1) +static inline int __on_sig_stack(unsigned long sp) +{ +#ifdef CONFIG_STACK_GROWSUP + return sp >= current->sas_ss_sp && + sp - current->sas_ss_sp < current->sas_ss_size; +#else + return sp > current->sas_ss_sp && + sp - current->sas_ss_sp <= current->sas_ss_size; +#endif +} + /* * True if we are on the alternate signal stack. */ @@ -555,13 +566,7 @@ static inline int on_sig_stack(unsigned long sp) if (current->sas_ss_flags & SS_AUTODISARM) return 0; -#ifdef CONFIG_STACK_GROWSUP - return sp >= current->sas_ss_sp && - sp - current->sas_ss_sp < current->sas_ss_size; -#else - return sp > current->sas_ss_sp && - sp - current->sas_ss_sp <= current->sas_ss_size; -#endif + return __on_sig_stack(sp); } static inline int sas_ss_flags(unsigned long sp) diff --git a/include/linux/sched/stat.h b/include/linux/sched/stat.h index 568286411b..0108a38bb6 100644 --- a/include/linux/sched/stat.h +++ b/include/linux/sched/stat.h @@ -3,6 +3,7 @@ #define _LINUX_SCHED_STAT_H #include +#include /* * Various counters maintained by the scheduler and fork(), @@ -16,21 +17,14 @@ extern unsigned long total_forks; extern int nr_threads; DECLARE_PER_CPU(unsigned long, process_counts); extern int nr_processes(void); -extern unsigned long nr_running(void); +extern unsigned int nr_running(void); extern bool single_task_running(void); -extern unsigned long nr_iowait(void); -extern unsigned long nr_iowait_cpu(int cpu); +extern unsigned int nr_iowait(void); +extern unsigned int nr_iowait_cpu(int cpu); static inline int sched_info_on(void) { -#ifdef CONFIG_SCHEDSTATS - return 1; -#elif defined(CONFIG_TASK_DELAY_ACCT) - extern int delayacct_on; - return delayacct_on; -#else - return 0; -#endif + return IS_ENABLED(CONFIG_SCHED_INFO); } #ifdef CONFIG_SCHEDSTATS diff --git a/include/linux/sched/user.h b/include/linux/sched/user.h index 3632c5d6ec..2462f7d076 100644 --- a/include/linux/sched/user.h +++ b/include/linux/sched/user.h @@ -12,16 +12,9 @@ */ struct user_struct { refcount_t __count; /* reference count */ - atomic_t processes; /* How many processes does this user have? */ - atomic_t sigpending; /* How many pending signals does this user have? */ #ifdef CONFIG_EPOLL atomic_long_t epoll_watches; /* The number of file descriptors currently watched */ #endif -#ifdef CONFIG_POSIX_MQUEUE - /* protected by mq_lock */ - unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */ -#endif - unsigned long locked_shm; /* How many pages of mlocked shm ? */ unsigned long unix_inflight; /* How many files in flight in unix sockets */ atomic_long_t pipe_bufs; /* how many pages are allocated in pipe buffers */ diff --git a/include/linux/sched_clock.h b/include/linux/sched_clock.h index 528718e4ed..835ee87ed7 100644 --- a/include/linux/sched_clock.h +++ b/include/linux/sched_clock.h @@ -14,7 +14,7 @@ * @sched_clock_mask: Bitmask for two's complement subtraction of non 64bit * clocks. * @read_sched_clock: Current clock source (or dummy source when suspended). - * @mult: Multipler for scaled math conversion. + * @mult: Multiplier for scaled math conversion. * @shift: Shift value for scaled math conversion. * * Care must be taken when updating this structure; it is read by diff --git a/include/linux/scmi_protocol.h b/include/linux/scmi_protocol.h index 79d0a1237e..80e781c51d 100644 --- a/include/linux/scmi_protocol.h +++ b/include/linux/scmi_protocol.h @@ -101,6 +101,10 @@ struct scmi_clk_proto_ops { * to sustained performance level mapping * @est_power_get: gets the estimated power cost for a given performance domain * at a given frequency + * @fast_switch_possible: indicates if fast DVFS switching is possible or not + * for a given device + * @power_scale_mw_get: indicates if the power values provided are in milliWatts + * or in some other (abstract) scale */ struct scmi_perf_proto_ops { int (*limits_set)(const struct scmi_protocol_handle *ph, u32 domain, @@ -153,7 +157,7 @@ struct scmi_power_proto_ops { }; /** - * scmi_sensor_reading - represent a timestamped read + * struct scmi_sensor_reading - represent a timestamped read * * Used by @reading_get_timestamped method. * @@ -167,7 +171,7 @@ struct scmi_sensor_reading { }; /** - * scmi_range_attrs - specifies a sensor or axis values' range + * struct scmi_range_attrs - specifies a sensor or axis values' range * @min_range: The minimum value which can be represented by the sensor/axis. * @max_range: The maximum value which can be represented by the sensor/axis. */ @@ -177,7 +181,7 @@ struct scmi_range_attrs { }; /** - * scmi_sensor_axis_info - describes one sensor axes + * struct scmi_sensor_axis_info - describes one sensor axes * @id: The axes ID. * @type: Axes type. Chosen amongst one of @enum scmi_sensor_class. * @scale: Power-of-10 multiplier applied to the axis unit. @@ -205,8 +209,8 @@ struct scmi_sensor_axis_info { }; /** - * scmi_sensor_intervals_info - describes number and type of available update - * intervals + * struct scmi_sensor_intervals_info - describes number and type of available + * update intervals * @segmented: Flag for segmented intervals' representation. When True there * will be exactly 3 intervals in @desc, with each entry * representing a member of a segment in this order: diff --git a/include/linux/scpi_protocol.h b/include/linux/scpi_protocol.h index afbf8037d8..d2176a5682 100644 --- a/include/linux/scpi_protocol.h +++ b/include/linux/scpi_protocol.h @@ -51,6 +51,14 @@ struct scpi_sensor_info { * OPP is an index to the list return by @dvfs_get_info * @dvfs_get_info: returns the DVFS capabilities of the given power * domain. It includes the OPP list and the latency information + * @device_domain_id: gets the scpi domain id for a given device + * @get_transition_latency: gets the DVFS transition latency for a given device + * @add_opps_to_device: adds all the OPPs for a given device + * @sensor_get_capability: get the list of capabilities for the sensors + * @sensor_get_info: get the information of the specified sensor + * @sensor_get_value: gets the current value of the sensor + * @device_get_power_state: gets the power state of a power domain + * @device_set_power_state: sets the power state of a power domain */ struct scpi_ops { u32 (*get_version)(void); diff --git a/include/linux/sctp.h b/include/linux/sctp.h index bb19265896..a86e852507 100644 --- a/include/linux/sctp.h +++ b/include/linux/sctp.h @@ -98,6 +98,7 @@ enum sctp_cid { SCTP_CID_I_FWD_TSN = 0xC2, SCTP_CID_ASCONF_ACK = 0x80, SCTP_CID_RECONF = 0x82, + SCTP_CID_PAD = 0x84, }; /* enum */ @@ -410,6 +411,12 @@ struct sctp_heartbeat_chunk { }; +/* PAD chunk could be bundled with heartbeat chunk to probe pmtu */ +struct sctp_pad_chunk { + struct sctp_chunkhdr uh; +}; + + /* For the abort and shutdown ACK we must carry the init tag in the * common header. Just the common header is all that is needed with a * chunk descriptor. diff --git a/include/linux/secretmem.h b/include/linux/secretmem.h new file mode 100644 index 0000000000..21c3771e6a --- /dev/null +++ b/include/linux/secretmem.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _LINUX_SECRETMEM_H +#define _LINUX_SECRETMEM_H + +#ifdef CONFIG_SECRETMEM + +extern const struct address_space_operations secretmem_aops; + +static inline bool page_is_secretmem(struct page *page) +{ + struct address_space *mapping; + + /* + * Using page_mapping() is quite slow because of the actual call + * instruction and repeated compound_head(page) inside the + * page_mapping() function. + * We know that secretmem pages are not compound and LRU so we can + * save a couple of cycles here. + */ + if (PageCompound(page) || !PageLRU(page)) + return false; + + mapping = (struct address_space *) + ((unsigned long)page->mapping & ~PAGE_MAPPING_FLAGS); + + if (mapping != page->mapping) + return false; + + return mapping->a_ops == &secretmem_aops; +} + +bool vma_is_secretmem(struct vm_area_struct *vma); +bool secretmem_active(void); + +#else + +static inline bool vma_is_secretmem(struct vm_area_struct *vma) +{ + return false; +} + +static inline bool page_is_secretmem(struct page *page) +{ + return false; +} + +static inline bool secretmem_active(void) +{ + return false; +} + +#endif /* CONFIG_SECRETMEM */ + +#endif /* _LINUX_SECRETMEM_H */ diff --git a/include/linux/security.h b/include/linux/security.h index 06f7c50ce7..24eda04221 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -1681,7 +1681,7 @@ int security_xfrm_state_alloc_acquire(struct xfrm_state *x, struct xfrm_sec_ctx *polsec, u32 secid); int security_xfrm_state_delete(struct xfrm_state *x); void security_xfrm_state_free(struct xfrm_state *x); -int security_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir); +int security_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid); int security_xfrm_state_pol_flow_match(struct xfrm_state *x, struct xfrm_policy *xp, const struct flowi_common *flic); @@ -1732,7 +1732,7 @@ static inline int security_xfrm_state_delete(struct xfrm_state *x) return 0; } -static inline int security_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir) +static inline int security_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid) { return 0; } diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h index 723b1fa117..dd99569595 100644 --- a/include/linux/seq_file.h +++ b/include/linux/seq_file.h @@ -126,8 +126,16 @@ void seq_put_decimal_ll(struct seq_file *m, const char *delimiter, long long num void seq_put_hex_ll(struct seq_file *m, const char *delimiter, unsigned long long v, unsigned int width); +void seq_escape_mem(struct seq_file *m, const char *src, size_t len, + unsigned int flags, const char *esc); + +static inline void seq_escape_str(struct seq_file *m, const char *src, + unsigned int flags, const char *esc) +{ + seq_escape_mem(m, src, strlen(src), flags, esc); +} + void seq_escape(struct seq_file *m, const char *s, const char *esc); -void seq_escape_mem_ascii(struct seq_file *m, const char *src, size_t isz); void seq_hex_dump(struct seq_file *m, const char *prefix_str, int prefix_type, int rowsize, int groupsize, const void *buf, size_t len, diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index f61e34fbaa..37ded6b8fe 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -182,9 +182,9 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s) #define seqcount_raw_spinlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, raw_spinlock) #define seqcount_spinlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, spinlock) -#define seqcount_rwlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, rwlock); -#define seqcount_mutex_init(s, lock) seqcount_LOCKNAME_init(s, lock, mutex); -#define seqcount_ww_mutex_init(s, lock) seqcount_LOCKNAME_init(s, lock, ww_mutex); +#define seqcount_rwlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, rwlock) +#define seqcount_mutex_init(s, lock) seqcount_LOCKNAME_init(s, lock, mutex) +#define seqcount_ww_mutex_init(s, lock) seqcount_LOCKNAME_init(s, lock, ww_mutex) /* * SEQCOUNT_LOCKNAME() - Instantiate seqcount_LOCKNAME_t and helpers diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h index 9e65505511..5db211f43b 100644 --- a/include/linux/serial_8250.h +++ b/include/linux/serial_8250.h @@ -146,7 +146,7 @@ static inline struct uart_8250_port *up_to_u8250p(struct uart_port *up) return container_of(up, struct uart_8250_port, port); } -int serial8250_register_8250_port(struct uart_8250_port *); +int serial8250_register_8250_port(const struct uart_8250_port *); void serial8250_unregister_port(int line); void serial8250_suspend_port(int line); void serial8250_resume_port(int line); diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index d7ed00f159..52d7fb92a6 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -408,7 +408,8 @@ int uart_register_driver(struct uart_driver *uart); void uart_unregister_driver(struct uart_driver *uart); int uart_add_one_port(struct uart_driver *reg, struct uart_port *port); int uart_remove_one_port(struct uart_driver *reg, struct uart_port *port); -int uart_match_port(struct uart_port *port1, struct uart_port *port2); +bool uart_match_port(const struct uart_port *port1, + const struct uart_port *port2); /* * Power Management @@ -428,7 +429,7 @@ int uart_resume_port(struct uart_driver *reg, struct uart_port *port); static inline int uart_tx_stopped(struct uart_port *port) { struct tty_struct *tty = port->state->port.tty; - if ((tty && tty->stopped) || port->hw_stopped) + if ((tty && tty->flow.stopped) || port->hw_stopped) return 1; return 0; } diff --git a/include/linux/set_memory.h b/include/linux/set_memory.h index fe1aa4e546..f36be5166c 100644 --- a/include/linux/set_memory.h +++ b/include/linux/set_memory.h @@ -28,7 +28,19 @@ static inline bool kernel_page_present(struct page *page) { return true; } +#else /* CONFIG_ARCH_HAS_SET_DIRECT_MAP */ +/* + * Some architectures, e.g. ARM64 can disable direct map modifications at + * boot time. Let them overrive this query. + */ +#ifndef can_set_direct_map +static inline bool can_set_direct_map(void) +{ + return true; +} +#define can_set_direct_map can_set_direct_map #endif +#endif /* CONFIG_ARCH_HAS_SET_DIRECT_MAP */ #ifndef set_mce_nospec static inline int set_mce_nospec(unsigned long pfn, bool unmap) diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h index d82b6f3965..8e775ce517 100644 --- a/include/linux/shmem_fs.h +++ b/include/linux/shmem_fs.h @@ -65,7 +65,7 @@ extern struct file *shmem_file_setup_with_mnt(struct vfsmount *mnt, extern int shmem_zero_setup(struct vm_area_struct *); extern unsigned long shmem_get_unmapped_area(struct file *, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags); -extern int shmem_lock(struct file *file, int lock, struct user_struct *user); +extern int shmem_lock(struct file *file, int lock, struct ucounts *ucounts); #ifdef CONFIG_SHMEM extern const struct address_space_operations shmem_aops; static inline bool shmem_mapping(struct address_space *mapping) @@ -122,21 +122,18 @@ static inline bool shmem_file(struct file *file) extern bool shmem_charge(struct inode *inode, long pages); extern void shmem_uncharge(struct inode *inode, long pages); +#ifdef CONFIG_USERFAULTFD #ifdef CONFIG_SHMEM -extern int shmem_mcopy_atomic_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd, +extern int shmem_mfill_atomic_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd, struct vm_area_struct *dst_vma, unsigned long dst_addr, unsigned long src_addr, + bool zeropage, struct page **pagep); -extern int shmem_mfill_zeropage_pte(struct mm_struct *dst_mm, - pmd_t *dst_pmd, - struct vm_area_struct *dst_vma, - unsigned long dst_addr); -#else -#define shmem_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, dst_addr, \ - src_addr, pagep) ({ BUG(); 0; }) -#define shmem_mfill_zeropage_pte(dst_mm, dst_pmd, dst_vma, \ - dst_addr) ({ BUG(); 0; }) -#endif +#else /* !CONFIG_SHMEM */ +#define shmem_mfill_atomic_pte(dst_mm, dst_pmd, dst_vma, dst_addr, \ + src_addr, zeropage, pagep) ({ BUG(); 0; }) +#endif /* CONFIG_SHMEM */ +#endif /* CONFIG_USERFAULTFD */ #endif diff --git a/include/linux/signal.h b/include/linux/signal.h index 5160fd45e5..3454c7ff07 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h @@ -462,8 +462,6 @@ int __save_altstack(stack_t __user *, unsigned long); unsafe_put_user((void __user *)t->sas_ss_sp, &__uss->ss_sp, label); \ unsafe_put_user(t->sas_ss_flags, &__uss->ss_flags, label); \ unsafe_put_user(t->sas_ss_size, &__uss->ss_size, label); \ - if (t->sas_ss_flags & SS_AUTODISARM) \ - sas_ss_reset(t); \ } while (0); #ifdef CONFIG_PROC_FS diff --git a/include/linux/signal_types.h b/include/linux/signal_types.h index 68e06c75c5..34cb28b8f1 100644 --- a/include/linux/signal_types.h +++ b/include/linux/signal_types.h @@ -13,6 +13,8 @@ typedef struct kernel_siginfo { __SIGINFO; } kernel_siginfo_t; +struct ucounts; + /* * Real Time signals may be queued. */ @@ -21,7 +23,7 @@ struct sigqueue { struct list_head list; int flags; kernel_siginfo_t info; - struct user_struct *user; + struct ucounts *ucounts; }; /* flags values. */ diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index dbf820a50a..b2db9cd9a7 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -37,6 +37,7 @@ #include #include #include +#include #if IS_ENABLED(CONFIG_NF_CONNTRACK) #include #endif @@ -667,6 +668,8 @@ typedef unsigned char *sk_buff_data_t; * @head_frag: skb was allocated from page fragments, * not allocated by kmalloc() or vmalloc(). * @pfmemalloc: skbuff was allocated from PFMEMALLOC reserves + * @pp_recycle: mark the packet for recycling instead of freeing (implies + * page_pool support on driver) * @active_extensions: active extensions (skb_ext_id types) * @ndisc_nodetype: router type (from link layer) * @ooo_okay: allow the mapping of a socket to a queue to be changed @@ -791,10 +794,12 @@ struct sk_buff { fclone:2, peeked:1, head_frag:1, - pfmemalloc:1; + pfmemalloc:1, + pp_recycle:1; /* page_pool recycle indicator */ #ifdef CONFIG_SKB_EXTENSIONS __u8 active_extensions; #endif + /* fields enclosed in headers_start/headers_end are copied * using a single memcpy() in __copy_skb_header() */ @@ -3081,12 +3086,20 @@ static inline void skb_frag_ref(struct sk_buff *skb, int f) /** * __skb_frag_unref - release a reference on a paged fragment. * @frag: the paged fragment + * @recycle: recycle the page if allocated via page_pool * - * Releases a reference on the paged fragment @frag. + * Releases a reference on the paged fragment @frag + * or recycles the page via the page_pool API. */ -static inline void __skb_frag_unref(skb_frag_t *frag) +static inline void __skb_frag_unref(skb_frag_t *frag, bool recycle) { - put_page(skb_frag_page(frag)); + struct page *page = skb_frag_page(frag); + +#ifdef CONFIG_PAGE_POOL + if (recycle && page_pool_return_skb_page(page)) + return; +#endif + put_page(page); } /** @@ -3098,7 +3111,7 @@ static inline void __skb_frag_unref(skb_frag_t *frag) */ static inline void skb_frag_unref(struct sk_buff *skb, int f) { - __skb_frag_unref(&skb_shinfo(skb)->frags[f]); + __skb_frag_unref(&skb_shinfo(skb)->frags[f], skb->pp_recycle); } /** @@ -4697,5 +4710,21 @@ static inline u64 skb_get_kcov_handle(struct sk_buff *skb) #endif } +#ifdef CONFIG_PAGE_POOL +static inline void skb_mark_for_recycle(struct sk_buff *skb, struct page *page, + struct page_pool *pp) +{ + skb->pp_recycle = 1; + page_pool_store_mem_info(page, pp); +} +#endif + +static inline bool skb_pp_recycle(struct sk_buff *skb, void *data) +{ + if (!IS_ENABLED(CONFIG_PAGE_POOL) || !skb->pp_recycle) + return false; + return page_pool_return_skb_page(virt_to_page(data)); +} + #endif /* __KERNEL__ */ #endif /* _LINUX_SKBUFF_H */ diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h index aba0f0f429..96f3190997 100644 --- a/include/linux/skmsg.h +++ b/include/linux/skmsg.h @@ -126,8 +126,6 @@ int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from, struct sk_msg *msg, u32 bytes); int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from, struct sk_msg *msg, u32 bytes); -int sk_msg_wait_data(struct sock *sk, struct sk_psock *psock, int flags, - long timeo, int *err); int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg, int len, int flags); @@ -348,7 +346,7 @@ static inline void sk_psock_report_error(struct sk_psock *psock, int err) struct sock *sk = psock->sk; sk->sk_err = err; - sk->sk_error_report(sk); + sk_error_report(sk); } struct sk_psock *sk_psock_init(struct sock *sk, int node); diff --git a/include/linux/slab.h b/include/linux/slab.h index 0c97d78876..083f3ce550 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -305,9 +305,21 @@ static inline void __check_heap_object(const void *ptr, unsigned long n, /* * Whenever changing this, take care of that kmalloc_type() and * create_kmalloc_caches() still work as intended. + * + * KMALLOC_NORMAL can contain only unaccounted objects whereas KMALLOC_CGROUP + * is for accounted but unreclaimable and non-dma objects. All the other + * kmem caches can have both accounted and unaccounted objects. */ enum kmalloc_cache_type { KMALLOC_NORMAL = 0, +#ifndef CONFIG_ZONE_DMA + KMALLOC_DMA = KMALLOC_NORMAL, +#endif +#ifndef CONFIG_MEMCG_KMEM + KMALLOC_CGROUP = KMALLOC_NORMAL, +#else + KMALLOC_CGROUP, +#endif KMALLOC_RECLAIM, #ifdef CONFIG_ZONE_DMA KMALLOC_DMA, @@ -319,24 +331,36 @@ enum kmalloc_cache_type { extern struct kmem_cache * kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1]; +/* + * Define gfp bits that should not be set for KMALLOC_NORMAL. + */ +#define KMALLOC_NOT_NORMAL_BITS \ + (__GFP_RECLAIMABLE | \ + (IS_ENABLED(CONFIG_ZONE_DMA) ? __GFP_DMA : 0) | \ + (IS_ENABLED(CONFIG_MEMCG_KMEM) ? __GFP_ACCOUNT : 0)) + static __always_inline enum kmalloc_cache_type kmalloc_type(gfp_t flags) { -#ifdef CONFIG_ZONE_DMA /* * The most common case is KMALLOC_NORMAL, so test for it - * with a single branch for both flags. + * with a single branch for all the relevant flags. */ - if (likely((flags & (__GFP_DMA | __GFP_RECLAIMABLE)) == 0)) + if (likely((flags & KMALLOC_NOT_NORMAL_BITS) == 0)) return KMALLOC_NORMAL; /* - * At least one of the flags has to be set. If both are, __GFP_DMA - * is more important. + * At least one of the flags has to be set. Their priorities in + * decreasing order are: + * 1) __GFP_DMA + * 2) __GFP_RECLAIMABLE + * 3) __GFP_ACCOUNT */ - return flags & __GFP_DMA ? KMALLOC_DMA : KMALLOC_RECLAIM; -#else - return flags & __GFP_RECLAIMABLE ? KMALLOC_RECLAIM : KMALLOC_NORMAL; -#endif + if (IS_ENABLED(CONFIG_ZONE_DMA) && (flags & __GFP_DMA)) + return KMALLOC_DMA; + if (!IS_ENABLED(CONFIG_MEMCG_KMEM) || (flags & __GFP_RECLAIMABLE)) + return KMALLOC_RECLAIM; + else + return KMALLOC_CGROUP; } /* @@ -346,8 +370,14 @@ static __always_inline enum kmalloc_cache_type kmalloc_type(gfp_t flags) * 1 = 65 .. 96 bytes * 2 = 129 .. 192 bytes * n = 2^(n-1)+1 .. 2^n + * + * Note: __kmalloc_index() is compile-time optimized, and not runtime optimized; + * typical usage is via kmalloc_index() and therefore evaluated at compile-time. + * Callers where !size_is_constant should only be test modules, where runtime + * overheads of __kmalloc_index() can be tolerated. Also see kmalloc_slab(). */ -static __always_inline unsigned int kmalloc_index(size_t size) +static __always_inline unsigned int __kmalloc_index(size_t size, + bool size_is_constant) { if (!size) return 0; @@ -382,12 +412,17 @@ static __always_inline unsigned int kmalloc_index(size_t size) if (size <= 8 * 1024 * 1024) return 23; if (size <= 16 * 1024 * 1024) return 24; if (size <= 32 * 1024 * 1024) return 25; - if (size <= 64 * 1024 * 1024) return 26; - BUG(); + + if ((IS_ENABLED(CONFIG_CC_IS_GCC) || CONFIG_CLANG_VERSION >= 110000) + && !IS_ENABLED(CONFIG_PROFILE_ALL_BRANCHES) && size_is_constant) + BUILD_BUG_ON_MSG(1, "unexpected size in kmalloc_index()"); + else + BUG(); /* Will never be reached. Needed because the compiler may complain */ return -1; } +#define kmalloc_index(s) __kmalloc_index(s, true) #endif /* !CONFIG_SLOB */ void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __malloc; diff --git a/include/linux/soc/ixp4xx/cpu.h b/include/linux/soc/ixp4xx/cpu.h new file mode 100644 index 0000000000..88bd8de0e8 --- /dev/null +++ b/include/linux/soc/ixp4xx/cpu.h @@ -0,0 +1,106 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * IXP4XX cpu type detection + * + * Copyright (C) 2007 MontaVista Software, Inc. + */ + +#ifndef __SOC_IXP4XX_CPU_H__ +#define __SOC_IXP4XX_CPU_H__ + +#include +#ifdef CONFIG_ARM +#include +#endif + +/* Processor id value in CP15 Register 0 */ +#define IXP42X_PROCESSOR_ID_VALUE 0x690541c0 /* including unused 0x690541Ex */ +#define IXP42X_PROCESSOR_ID_MASK 0xffffffc0 + +#define IXP43X_PROCESSOR_ID_VALUE 0x69054040 +#define IXP43X_PROCESSOR_ID_MASK 0xfffffff0 + +#define IXP46X_PROCESSOR_ID_VALUE 0x69054200 /* including IXP455 */ +#define IXP46X_PROCESSOR_ID_MASK 0xfffffff0 + +/* "fuse" bits of IXP_EXP_CFG2 */ +/* All IXP4xx CPUs */ +#define IXP4XX_FEATURE_RCOMP (1 << 0) +#define IXP4XX_FEATURE_USB_DEVICE (1 << 1) +#define IXP4XX_FEATURE_HASH (1 << 2) +#define IXP4XX_FEATURE_AES (1 << 3) +#define IXP4XX_FEATURE_DES (1 << 4) +#define IXP4XX_FEATURE_HDLC (1 << 5) +#define IXP4XX_FEATURE_AAL (1 << 6) +#define IXP4XX_FEATURE_HSS (1 << 7) +#define IXP4XX_FEATURE_UTOPIA (1 << 8) +#define IXP4XX_FEATURE_NPEB_ETH0 (1 << 9) +#define IXP4XX_FEATURE_NPEC_ETH (1 << 10) +#define IXP4XX_FEATURE_RESET_NPEA (1 << 11) +#define IXP4XX_FEATURE_RESET_NPEB (1 << 12) +#define IXP4XX_FEATURE_RESET_NPEC (1 << 13) +#define IXP4XX_FEATURE_PCI (1 << 14) +#define IXP4XX_FEATURE_UTOPIA_PHY_LIMIT (3 << 16) +#define IXP4XX_FEATURE_XSCALE_MAX_FREQ (3 << 22) +#define IXP42X_FEATURE_MASK (IXP4XX_FEATURE_RCOMP | \ + IXP4XX_FEATURE_USB_DEVICE | \ + IXP4XX_FEATURE_HASH | \ + IXP4XX_FEATURE_AES | \ + IXP4XX_FEATURE_DES | \ + IXP4XX_FEATURE_HDLC | \ + IXP4XX_FEATURE_AAL | \ + IXP4XX_FEATURE_HSS | \ + IXP4XX_FEATURE_UTOPIA | \ + IXP4XX_FEATURE_NPEB_ETH0 | \ + IXP4XX_FEATURE_NPEC_ETH | \ + IXP4XX_FEATURE_RESET_NPEA | \ + IXP4XX_FEATURE_RESET_NPEB | \ + IXP4XX_FEATURE_RESET_NPEC | \ + IXP4XX_FEATURE_PCI | \ + IXP4XX_FEATURE_UTOPIA_PHY_LIMIT | \ + IXP4XX_FEATURE_XSCALE_MAX_FREQ) + + +/* IXP43x/46x CPUs */ +#define IXP4XX_FEATURE_ECC_TIMESYNC (1 << 15) +#define IXP4XX_FEATURE_USB_HOST (1 << 18) +#define IXP4XX_FEATURE_NPEA_ETH (1 << 19) +#define IXP43X_FEATURE_MASK (IXP42X_FEATURE_MASK | \ + IXP4XX_FEATURE_ECC_TIMESYNC | \ + IXP4XX_FEATURE_USB_HOST | \ + IXP4XX_FEATURE_NPEA_ETH) + +/* IXP46x CPU (including IXP455) only */ +#define IXP4XX_FEATURE_NPEB_ETH_1_TO_3 (1 << 20) +#define IXP4XX_FEATURE_RSA (1 << 21) +#define IXP46X_FEATURE_MASK (IXP43X_FEATURE_MASK | \ + IXP4XX_FEATURE_NPEB_ETH_1_TO_3 | \ + IXP4XX_FEATURE_RSA) + +#ifdef CONFIG_ARCH_IXP4XX +#define cpu_is_ixp42x_rev_a0() ((read_cpuid_id() & (IXP42X_PROCESSOR_ID_MASK | 0xF)) == \ + IXP42X_PROCESSOR_ID_VALUE) +#define cpu_is_ixp42x() ((read_cpuid_id() & IXP42X_PROCESSOR_ID_MASK) == \ + IXP42X_PROCESSOR_ID_VALUE) +#define cpu_is_ixp43x() ((read_cpuid_id() & IXP43X_PROCESSOR_ID_MASK) == \ + IXP43X_PROCESSOR_ID_VALUE) +#define cpu_is_ixp46x() ((read_cpuid_id() & IXP46X_PROCESSOR_ID_MASK) == \ + IXP46X_PROCESSOR_ID_VALUE) + +u32 ixp4xx_read_feature_bits(void); +void ixp4xx_write_feature_bits(u32 value); +#else +#define cpu_is_ixp42x_rev_a0() 0 +#define cpu_is_ixp42x() 0 +#define cpu_is_ixp43x() 0 +#define cpu_is_ixp46x() 0 +static inline u32 ixp4xx_read_feature_bits(void) +{ + return 0; +} +static inline void ixp4xx_write_feature_bits(u32 value) +{ +} +#endif + +#endif /* _ASM_ARCH_CPU_H */ diff --git a/include/linux/soc/qcom/smem_state.h b/include/linux/soc/qcom/smem_state.h index 63ad8cddad..652c0158ba 100644 --- a/include/linux/soc/qcom/smem_state.h +++ b/include/linux/soc/qcom/smem_state.h @@ -14,6 +14,7 @@ struct qcom_smem_state_ops { #ifdef CONFIG_QCOM_SMEM_STATE struct qcom_smem_state *qcom_smem_state_get(struct device *dev, const char *con_id, unsigned *bit); +struct qcom_smem_state *devm_qcom_smem_state_get(struct device *dev, const char *con_id, unsigned *bit); void qcom_smem_state_put(struct qcom_smem_state *); int qcom_smem_state_update_bits(struct qcom_smem_state *state, u32 mask, u32 value); @@ -29,6 +30,13 @@ static inline struct qcom_smem_state *qcom_smem_state_get(struct device *dev, return ERR_PTR(-EINVAL); } +static inline struct qcom_smem_state *devm_qcom_smem_state_get(struct device *dev, + const char *con_id, + unsigned *bit) +{ + return ERR_PTR(-EINVAL); +} + static inline void qcom_smem_state_put(struct qcom_smem_state *state) { } diff --git a/include/linux/soc/samsung/exynos-regs-pmu.h b/include/linux/soc/samsung/exynos-regs-pmu.h index fc9250fb31..aa840ed043 100644 --- a/include/linux/soc/samsung/exynos-regs-pmu.h +++ b/include/linux/soc/samsung/exynos-regs-pmu.h @@ -611,12 +611,6 @@ #define EXYNOS5420_FSYS2_OPTION 0x4168 #define EXYNOS5420_PSGEN_OPTION 0x4188 -/* For EXYNOS_CENTRAL_SEQ_OPTION */ -#define EXYNOS5_USE_STANDBYWFI_ARM_CORE0 BIT(16) -#define EXYNOS5_USE_STANDBYWFI_ARM_CORE1 BUT(17) -#define EXYNOS5_USE_STANDBYWFE_ARM_CORE0 BIT(24) -#define EXYNOS5_USE_STANDBYWFE_ARM_CORE1 BIT(25) - #define EXYNOS5420_ARM_USE_STANDBY_WFI0 BIT(4) #define EXYNOS5420_ARM_USE_STANDBY_WFI1 BIT(5) #define EXYNOS5420_ARM_USE_STANDBY_WFI2 BIT(6) diff --git a/include/linux/soundwire/sdw.h b/include/linux/soundwire/sdw.h index ced07f8fde..a48ac3e773 100644 --- a/include/linux/soundwire/sdw.h +++ b/include/linux/soundwire/sdw.h @@ -612,6 +612,7 @@ struct sdw_bus_params { * @update_status: Update Slave status * @bus_config: Update the bus config for Slave * @port_prep: Prepare the port with parameters + * @clk_stop: handle imp-def sequences before and after prepare and de-prepare */ struct sdw_slave_ops { int (*read_prop)(struct sdw_slave *sdw); @@ -624,7 +625,6 @@ struct sdw_slave_ops { int (*port_prep)(struct sdw_slave *slave, struct sdw_prepare_ch *prepare_ch, enum sdw_port_prep_ops pre_ops); - int (*get_clk_stop_mode)(struct sdw_slave *slave); int (*clk_stop)(struct sdw_slave *slave, enum sdw_clk_stop_mode mode, enum sdw_clk_stop_type type); @@ -675,7 +675,6 @@ struct sdw_slave { struct list_head node; struct completion port_ready[SDW_MAX_PORTS]; unsigned int m_port_map[SDW_MAX_PORTS]; - enum sdw_clk_stop_mode curr_clk_stop_mode; u16 dev_num; u16 dev_num_sticky; bool probed; @@ -1040,7 +1039,10 @@ int sdw_write(struct sdw_slave *slave, u32 addr, u8 value); int sdw_write_no_pm(struct sdw_slave *slave, u32 addr, u8 value); int sdw_read_no_pm(struct sdw_slave *slave, u32 addr); int sdw_nread(struct sdw_slave *slave, u32 addr, size_t count, u8 *val); -int sdw_nwrite(struct sdw_slave *slave, u32 addr, size_t count, u8 *val); +int sdw_nwrite(struct sdw_slave *slave, u32 addr, size_t count, const u8 *val); +int sdw_update(struct sdw_slave *slave, u32 addr, u8 mask, u8 val); +int sdw_update_no_pm(struct sdw_slave *slave, u32 addr, u8 mask, u8 val); + int sdw_compare_devid(struct sdw_slave *slave, struct sdw_slave_id id); void sdw_extract_slave_id(struct sdw_bus *bus, u64 addr, struct sdw_slave_id *id); diff --git a/include/linux/soundwire/sdw_intel.h b/include/linux/soundwire/sdw_intel.h index 3a5446ac01..1ebea77640 100644 --- a/include/linux/soundwire/sdw_intel.h +++ b/include/linux/soundwire/sdw_intel.h @@ -58,7 +58,7 @@ struct sdw_intel_acpi_info { u32 link_mask; }; -struct sdw_intel_link_res; +struct sdw_intel_link_dev; /* Intel clock-stop/pm_runtime quirk definitions */ @@ -109,7 +109,7 @@ struct sdw_intel_slave_id { * Controller * @num_slaves: total number of devices exposed across all enabled links * @handle: ACPI parent handle - * @links: information for each link (controller-specific and kept + * @ldev: information for each link (controller-specific and kept * opaque here) * @ids: array of slave_id, representing Slaves exposed across all enabled * links @@ -123,7 +123,7 @@ struct sdw_intel_ctx { u32 link_mask; int num_slaves; acpi_handle handle; - struct sdw_intel_link_res *links; + struct sdw_intel_link_dev **ldev; struct sdw_intel_slave_id *ids; struct list_head link_list; struct mutex shim_lock; /* lock for access to shared SHIM registers */ diff --git a/include/linux/spi/pxa2xx_spi.h b/include/linux/spi/pxa2xx_spi.h index 31f00c7f4f..eaab121ee5 100644 --- a/include/linux/spi/pxa2xx_spi.h +++ b/include/linux/spi/pxa2xx_spi.h @@ -2,8 +2,10 @@ /* * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs */ -#ifndef __linux_pxa2xx_spi_h -#define __linux_pxa2xx_spi_h +#ifndef __LINUX_SPI_PXA2XX_SPI_H +#define __LINUX_SPI_PXA2XX_SPI_H + +#include #include @@ -12,7 +14,10 @@ struct dma_chan; -/* device.platform_data for SSP controller devices */ +/* + * The platform data for SSP controller devices + * (resides in device.platform_data). + */ struct pxa2xx_spi_controller { u16 num_chipselect; u8 enable_dma; @@ -28,8 +33,11 @@ struct pxa2xx_spi_controller { struct ssp_device ssp; }; -/* spi_board_info.controller_data for SPI slave devices, - * copied to spi_device.platform_data ... mostly for dma tuning +/* + * The controller specific data for SPI slave devices + * (resides in spi_board_info.controller_data), + * copied to spi_device.platform_data ... mostly for + * DMA tuning. */ struct pxa2xx_spi_chip { u8 tx_threshold; @@ -49,4 +57,5 @@ struct pxa2xx_spi_chip { extern void pxa2xx_set_spi_info(unsigned id, struct pxa2xx_spi_controller *info); #endif -#endif + +#endif /* __LINUX_SPI_PXA2XX_SPI_H */ diff --git a/include/linux/spi/spi-mem.h b/include/linux/spi/spi-mem.h index 2b65c9edc3..85e2ff7b84 100644 --- a/include/linux/spi/spi-mem.h +++ b/include/linux/spi/spi-mem.h @@ -250,6 +250,9 @@ static inline void *spi_mem_get_drvdata(struct spi_mem *mem) * the currently mapped area), and the caller of * spi_mem_dirmap_write() is responsible for calling it again in * this case. + * @poll_status: poll memory device status until (status & mask) == match or + * when the timeout has expired. It fills the data buffer with + * the last status value. * * This interface should be implemented by SPI controllers providing an * high-level interface to execute SPI memory operation, which is usually the @@ -274,6 +277,12 @@ struct spi_controller_mem_ops { u64 offs, size_t len, void *buf); ssize_t (*dirmap_write)(struct spi_mem_dirmap_desc *desc, u64 offs, size_t len, const void *buf); + int (*poll_status)(struct spi_mem *mem, + const struct spi_mem_op *op, + u16 mask, u16 match, + unsigned long initial_delay_us, + unsigned long polling_rate_us, + unsigned long timeout_ms); }; /** @@ -369,6 +378,13 @@ devm_spi_mem_dirmap_create(struct device *dev, struct spi_mem *mem, void devm_spi_mem_dirmap_destroy(struct device *dev, struct spi_mem_dirmap_desc *desc); +int spi_mem_poll_status(struct spi_mem *mem, + const struct spi_mem_op *op, + u16 mask, u16 match, + unsigned long initial_delay_us, + unsigned long polling_delay_us, + u16 timeout_ms); + int spi_mem_driver_register_with_owner(struct spi_mem_driver *drv, struct module *owner); diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index 74239d65c7..97b8d12b5f 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -299,6 +299,8 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv) driver_unregister(&sdrv->driver); } +extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 chip_select); + /* use a define to avoid include chaining to get THIS_MODULE */ #define spi_register_driver(driver) \ __spi_register_driver(THIS_MODULE, driver) @@ -586,6 +588,7 @@ struct spi_controller { bool (*can_dma)(struct spi_controller *ctlr, struct spi_device *spi, struct spi_transfer *xfer); + struct device *dma_map_dev; /* * These hooks are for drivers that want to use the generic @@ -1108,11 +1111,6 @@ static inline void spi_message_free(struct spi_message *m) kfree(m); } -extern int spi_set_cs_timing(struct spi_device *spi, - struct spi_delay *setup, - struct spi_delay *hold, - struct spi_delay *inactive); - extern int spi_setup(struct spi_device *spi); extern int spi_async(struct spi_device *spi, struct spi_message *message); extern int spi_async_locked(struct spi_device *spi, diff --git a/include/linux/srcu.h b/include/linux/srcu.h index a0895bbf71..e6011a9975 100644 --- a/include/linux/srcu.h +++ b/include/linux/srcu.h @@ -64,6 +64,12 @@ unsigned long get_state_synchronize_srcu(struct srcu_struct *ssp); unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp); bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie); +#ifdef CONFIG_SRCU +void srcu_init(void); +#else /* #ifdef CONFIG_SRCU */ +static inline void srcu_init(void) { } +#endif /* #else #ifdef CONFIG_SRCU */ + #ifdef CONFIG_DEBUG_LOCK_ALLOC /** diff --git a/include/linux/srcutree.h b/include/linux/srcutree.h index 9cfcc8a756..cb1f4351e8 100644 --- a/include/linux/srcutree.h +++ b/include/linux/srcutree.h @@ -82,9 +82,7 @@ struct srcu_struct { /* callback for the barrier */ /* operation. */ struct delayed_work work; -#ifdef CONFIG_DEBUG_LOCK_ALLOC struct lockdep_map dep_map; -#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ }; /* Values for state variable (bottom bits of ->srcu_gp_seq). */ diff --git a/include/linux/stm.h b/include/linux/stm.h index c6f577ab6f..3b22689512 100644 --- a/include/linux/stm.h +++ b/include/linux/stm.h @@ -57,7 +57,7 @@ struct stm_device; * * Normally, an STM device will have a range of masters available to software * and the rest being statically assigned to various hardware trace sources. - * The former is defined by the the range [@sw_start..@sw_end] of the device + * The former is defined by the range [@sw_start..@sw_end] of the device * description. That is, the lowest master that can be allocated to software * writers is @sw_start and data from this writer will appear is @sw_start * master in the STP stream. diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h index 0db36360ef..a6f03b36fc 100644 --- a/include/linux/stmmac.h +++ b/include/linux/stmmac.h @@ -115,7 +115,9 @@ struct stmmac_axi { #define EST_GCL 1024 struct stmmac_est { + struct mutex lock; int enable; + u32 btr_reserve[2]; u32 btr_offset[2]; u32 btr[2]; u32 ctr[2]; @@ -172,6 +174,18 @@ struct stmmac_fpe_cfg { enum stmmac_fpe_state lo_fpe_state; /* Local station FPE state */ }; +struct stmmac_safety_feature_cfg { + u32 tsoee; + u32 mrxpee; + u32 mestee; + u32 mrxee; + u32 mtxee; + u32 epsi; + u32 edpp; + u32 prtyen; + u32 tmouten; +}; + struct plat_stmmacenet_data { int bus_id; int phy_addr; @@ -184,6 +198,7 @@ struct plat_stmmacenet_data { struct stmmac_dma_cfg *dma_cfg; struct stmmac_est *est; struct stmmac_fpe_cfg *fpe_cfg; + struct stmmac_safety_feature_cfg *safety_feat_cfg; int clk_csr; int has_gmac; int enh_desc; @@ -210,6 +225,7 @@ struct plat_stmmacenet_data { void (*fix_mac_speed)(void *priv, unsigned int speed); int (*serdes_powerup)(struct net_device *ndev, void *priv); void (*serdes_powerdown)(struct net_device *ndev, void *priv); + void (*speed_mode_2500)(struct net_device *ndev, void *priv); void (*ptp_clk_freq_config)(void *priv); int (*init)(struct platform_device *pdev, void *priv); void (*exit)(struct platform_device *pdev, void *priv); @@ -223,8 +239,10 @@ struct plat_stmmacenet_data { struct clk *clk_ptp_ref; unsigned int clk_ptp_rate; unsigned int clk_ref_rate; + unsigned int mult_fact_100ns; s32 ptp_max_adj; struct reset_control *stmmac_rst; + struct reset_control *stmmac_ahb_rst; struct stmmac_axi *axi; int has_gmac4; bool has_sun8i; @@ -249,5 +267,6 @@ struct plat_stmmacenet_data { int msi_sfty_ue_vec; int msi_rx_base_vec; int msi_tx_base_vec; + bool use_phy_wol; }; #endif diff --git a/include/linux/string.h b/include/linux/string.h index 9521d8cab1..b48d2d28e0 100644 --- a/include/linux/string.h +++ b/include/linux/string.h @@ -2,7 +2,6 @@ #ifndef _LINUX_STRING_H_ #define _LINUX_STRING_H_ - #include /* for inline */ #include /* for size_t */ #include /* for NULL */ @@ -184,12 +183,6 @@ extern char **argv_split(gfp_t gfp, const char *str, int *argcp); extern void argv_free(char **argv); extern bool sysfs_streq(const char *s1, const char *s2); -extern int kstrtobool(const char *s, bool *res); -static inline int strtobool(const char *s, bool *res) -{ - return kstrtobool(s, res); -} - int match_string(const char * const *array, size_t n, const char *string); int __sysfs_match_string(const char * const *array, size_t n, const char *s); diff --git a/include/linux/string_helpers.h b/include/linux/string_helpers.h index fa06dcdc48..68189c4a2e 100644 --- a/include/linux/string_helpers.h +++ b/include/linux/string_helpers.h @@ -2,6 +2,7 @@ #ifndef _LINUX_STRING_HELPERS_H_ #define _LINUX_STRING_HELPERS_H_ +#include #include #include @@ -18,13 +19,15 @@ enum string_size_units { void string_get_size(u64 size, u64 blk_size, enum string_size_units units, char *buf, int len); -#define UNESCAPE_SPACE 0x01 -#define UNESCAPE_OCTAL 0x02 -#define UNESCAPE_HEX 0x04 -#define UNESCAPE_SPECIAL 0x08 +#define UNESCAPE_SPACE BIT(0) +#define UNESCAPE_OCTAL BIT(1) +#define UNESCAPE_HEX BIT(2) +#define UNESCAPE_SPECIAL BIT(3) #define UNESCAPE_ANY \ (UNESCAPE_SPACE | UNESCAPE_OCTAL | UNESCAPE_HEX | UNESCAPE_SPECIAL) +#define UNESCAPE_ALL_MASK GENMASK(3, 0) + int string_unescape(char *src, char *dst, size_t size, unsigned int flags); static inline int string_unescape_inplace(char *buf, unsigned int flags) @@ -42,22 +45,24 @@ static inline int string_unescape_any_inplace(char *buf) return string_unescape_any(buf, buf, 0); } -#define ESCAPE_SPACE 0x01 -#define ESCAPE_SPECIAL 0x02 -#define ESCAPE_NULL 0x04 -#define ESCAPE_OCTAL 0x08 +#define ESCAPE_SPACE BIT(0) +#define ESCAPE_SPECIAL BIT(1) +#define ESCAPE_NULL BIT(2) +#define ESCAPE_OCTAL BIT(3) #define ESCAPE_ANY \ (ESCAPE_SPACE | ESCAPE_OCTAL | ESCAPE_SPECIAL | ESCAPE_NULL) -#define ESCAPE_NP 0x10 +#define ESCAPE_NP BIT(4) #define ESCAPE_ANY_NP (ESCAPE_ANY | ESCAPE_NP) -#define ESCAPE_HEX 0x20 +#define ESCAPE_HEX BIT(5) +#define ESCAPE_NA BIT(6) +#define ESCAPE_NAP BIT(7) +#define ESCAPE_APPEND BIT(8) + +#define ESCAPE_ALL_MASK GENMASK(8, 0) int string_escape_mem(const char *src, size_t isz, char *dst, size_t osz, unsigned int flags, const char *only); -int string_escape_mem_ascii(const char *src, size_t isz, char *dst, - size_t osz); - static inline int string_escape_mem_any_np(const char *src, size_t isz, char *dst, size_t osz, const char *only) { diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h index d0965e2997..b134b2b337 100644 --- a/include/linux/sunrpc/cache.h +++ b/include/linux/sunrpc/cache.h @@ -14,6 +14,7 @@ #include #include #include +#include #include /* diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h index 02e7a5863d..8b5d5c9755 100644 --- a/include/linux/sunrpc/clnt.h +++ b/include/linux/sunrpc/clnt.h @@ -29,6 +29,7 @@ #include struct rpc_inode; +struct rpc_sysfs_client; /* * The high-level client handle @@ -71,6 +72,7 @@ struct rpc_clnt { #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) struct dentry *cl_debugfs; /* debugfs directory */ #endif + struct rpc_sysfs_client *cl_sysfs; /* sysfs directory */ /* cl_work is only needed after cl_xpi is no longer used, * and that are of similar size */ diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h index df696efdd6..a237b8dbf6 100644 --- a/include/linux/sunrpc/sched.h +++ b/include/linux/sunrpc/sched.h @@ -121,6 +121,7 @@ struct rpc_task_setup { */ #define RPC_TASK_ASYNC 0x0001 /* is an async task */ #define RPC_TASK_SWAPPER 0x0002 /* is swapping in/out */ +#define RPC_TASK_MOVEABLE 0x0004 /* nfs4.1+ rpc tasks */ #define RPC_TASK_NULLCREDS 0x0010 /* Use AUTH_NULL credential */ #define RPC_CALL_MAJORSEEN 0x0020 /* major timeout seen */ #define RPC_TASK_ROOTCREDS 0x0040 /* force root creds */ @@ -139,6 +140,7 @@ struct rpc_task_setup { #define RPC_IS_SOFT(t) ((t)->tk_flags & (RPC_TASK_SOFT|RPC_TASK_TIMEOUT)) #define RPC_IS_SOFTCONN(t) ((t)->tk_flags & RPC_TASK_SOFTCONN) #define RPC_WAS_SENT(t) ((t)->tk_flags & RPC_TASK_SENT) +#define RPC_IS_MOVEABLE(t) ((t)->tk_flags & RPC_TASK_MOVEABLE) #define RPC_TASK_RUNNING 0 #define RPC_TASK_QUEUED 1 diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h index 61b622e334..c8c39f22d3 100644 --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h @@ -53,6 +53,7 @@ enum rpc_display_format_t { struct rpc_task; struct rpc_xprt; +struct xprt_class; struct seq_file; struct svc_serv; struct net; @@ -182,9 +183,11 @@ enum xprt_transports { XPRT_TRANSPORT_LOCAL = 257, }; +struct rpc_sysfs_xprt; struct rpc_xprt { struct kref kref; /* Reference count */ const struct rpc_xprt_ops *ops; /* transport methods */ + unsigned int id; /* transport id */ const struct rpc_timeout *timeout; /* timeout parms */ struct sockaddr_storage addr; /* server address */ @@ -288,6 +291,9 @@ struct rpc_xprt { atomic_t inject_disconnect; #endif struct rcu_head rcu; + const struct xprt_class *xprt_class; + struct rpc_sysfs_xprt *xprt_sysfs; + bool main; /*mark if this is the 1st transport */ }; #if defined(CONFIG_SUNRPC_BACKCHANNEL) @@ -370,6 +376,7 @@ struct rpc_xprt * xprt_alloc(struct net *net, size_t size, void xprt_free(struct rpc_xprt *); void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task); bool xprt_wake_up_backlog(struct rpc_xprt *xprt, struct rpc_rqst *req); +void xprt_cleanup_ids(void); static inline int xprt_enable_swap(struct rpc_xprt *xprt) @@ -408,6 +415,7 @@ void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie); bool xprt_lock_connect(struct rpc_xprt *, struct rpc_task *, void *); void xprt_unlock_connect(struct rpc_xprt *, void *); +void xprt_release_write(struct rpc_xprt *, struct rpc_task *); /* * Reserved bit positions in xprt->state @@ -419,6 +427,8 @@ void xprt_unlock_connect(struct rpc_xprt *, void *); #define XPRT_BOUND (4) #define XPRT_BINDING (5) #define XPRT_CLOSING (6) +#define XPRT_OFFLINE (7) +#define XPRT_REMOVE (8) #define XPRT_CONGESTED (9) #define XPRT_CWND_WAIT (10) #define XPRT_WRITE_SPACE (11) diff --git a/include/linux/sunrpc/xprtmultipath.h b/include/linux/sunrpc/xprtmultipath.h index c6cce3fbf2..b19addc8b7 100644 --- a/include/linux/sunrpc/xprtmultipath.h +++ b/include/linux/sunrpc/xprtmultipath.h @@ -10,10 +10,12 @@ #define _NET_SUNRPC_XPRTMULTIPATH_H struct rpc_xprt_iter_ops; +struct rpc_sysfs_xprt_switch; struct rpc_xprt_switch { spinlock_t xps_lock; struct kref xps_kref; + unsigned int xps_id; unsigned int xps_nxprts; unsigned int xps_nactive; atomic_long_t xps_queuelen; @@ -23,6 +25,7 @@ struct rpc_xprt_switch { const struct rpc_xprt_iter_ops *xps_iter_ops; + struct rpc_sysfs_xprt_switch *xps_sysfs; struct rcu_head xps_rcu; }; @@ -71,4 +74,7 @@ extern struct rpc_xprt *xprt_iter_get_next(struct rpc_xprt_iter *xpi); extern bool rpc_xprt_switch_has_addr(struct rpc_xprt_switch *xps, const struct sockaddr *sap); + +extern void xprt_multipath_cleanup_ids(void); + #endif diff --git a/include/linux/sunrpc/xprtsock.h b/include/linux/sunrpc/xprtsock.h index 3c1423ee74..8c2a712cb2 100644 --- a/include/linux/sunrpc/xprtsock.h +++ b/include/linux/sunrpc/xprtsock.h @@ -10,6 +10,7 @@ int init_socket_xprt(void); void cleanup_socket_xprt(void); +unsigned short get_srcport(struct rpc_xprt *); #define RPC_MIN_RESVPORT (1U) #define RPC_MAX_RESVPORT (65535U) diff --git a/include/linux/surface_aggregator/controller.h b/include/linux/surface_aggregator/controller.h index 0806796eab..068e1982ad 100644 --- a/include/linux/surface_aggregator/controller.h +++ b/include/linux/surface_aggregator/controller.h @@ -6,7 +6,7 @@ * managing access and communication to and from the SSAM EC, as well as main * communication structures and definitions. * - * Copyright (C) 2019-2020 Maximilian Luz + * Copyright (C) 2019-2021 Maximilian Luz */ #ifndef _LINUX_SURFACE_AGGREGATOR_CONTROLLER_H @@ -795,6 +795,20 @@ enum ssam_event_mask { #define SSAM_EVENT_REGISTRY_REG \ SSAM_EVENT_REGISTRY(SSAM_SSH_TC_REG, 0x02, 0x01, 0x02) +/** + * enum ssam_event_notifier_flags - Flags for event notifiers. + * @SSAM_EVENT_NOTIFIER_OBSERVER: + * The corresponding notifier acts as observer. Registering a notifier + * with this flag set will not attempt to enable any event. Equally, + * unregistering will not attempt to disable any event. Note that a + * notifier with this flag may not even correspond to a certain event at + * all, only to a specific event target category. Event matching will not + * be influenced by this flag. + */ +enum ssam_event_notifier_flags { + SSAM_EVENT_NOTIFIER_OBSERVER = BIT(0), +}; + /** * struct ssam_event_notifier - Notifier block for SSAM events. * @base: The base notifier block with callback function and priority. @@ -803,6 +817,7 @@ enum ssam_event_mask { * @event.id: ID specifying the event. * @event.mask: Flags determining how events are matched to the notifier. * @event.flags: Flags used for enabling the event. + * @flags: Notifier flags (see &enum ssam_event_notifier_flags). */ struct ssam_event_notifier { struct ssam_notifier_block base; @@ -813,6 +828,8 @@ struct ssam_event_notifier { enum ssam_event_mask mask; u8 flags; } event; + + unsigned long flags; }; int ssam_notifier_register(struct ssam_controller *ctrl, @@ -821,4 +838,12 @@ int ssam_notifier_register(struct ssam_controller *ctrl, int ssam_notifier_unregister(struct ssam_controller *ctrl, struct ssam_event_notifier *n); +int ssam_controller_event_enable(struct ssam_controller *ctrl, + struct ssam_event_registry reg, + struct ssam_event_id id, u8 flags); + +int ssam_controller_event_disable(struct ssam_controller *ctrl, + struct ssam_event_registry reg, + struct ssam_event_id id, u8 flags); + #endif /* _LINUX_SURFACE_AGGREGATOR_CONTROLLER_H */ diff --git a/include/linux/swap.h b/include/linux/swap.h index 144727041e..6f5a432515 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -62,12 +62,17 @@ static inline int current_is_kswapd(void) * migrate part of a process memory to device memory. * * When a page is migrated from CPU to device, we set the CPU page table entry - * to a special SWP_DEVICE_* entry. + * to a special SWP_DEVICE_{READ|WRITE} entry. + * + * When a page is mapped by the device for exclusive access we set the CPU page + * table entries to special SWP_DEVICE_EXCLUSIVE_* entries. */ #ifdef CONFIG_DEVICE_PRIVATE -#define SWP_DEVICE_NUM 2 +#define SWP_DEVICE_NUM 4 #define SWP_DEVICE_WRITE (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM) #define SWP_DEVICE_READ (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+1) +#define SWP_DEVICE_EXCLUSIVE_WRITE (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+2) +#define SWP_DEVICE_EXCLUSIVE_READ (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+3) #else #define SWP_DEVICE_NUM 0 #endif @@ -177,7 +182,6 @@ enum { SWP_PAGE_DISCARD = (1 << 10), /* freed swap page-cluster discards */ SWP_STABLE_WRITES = (1 << 11), /* no overwrite PG_writeback pages */ SWP_SYNCHRONOUS_IO = (1 << 12), /* synchronous IO is efficient */ - SWP_VALID = (1 << 13), /* swap is valid to be operated on? */ /* add others here before... */ SWP_SCANNING = (1 << 14), /* refcount in scan_swap_map */ }; @@ -240,6 +244,7 @@ struct swap_cluster_list { * The in-memory structure used to track swap areas. */ struct swap_info_struct { + struct percpu_ref users; /* indicate and keep swap device valid. */ unsigned long flags; /* SWP_USED etc: see above */ signed short prio; /* swap priority of this type */ struct plist_node list; /* entry in swap_active_head */ @@ -260,6 +265,7 @@ struct swap_info_struct { struct block_device *bdev; /* swap device or bdev of swap file */ struct file *swap_file; /* seldom referenced */ unsigned int old_block_size; /* seldom referenced */ + struct completion comp; /* seldom referenced */ #ifdef CONFIG_FRONTSWAP unsigned long *frontswap_map; /* frontswap in-use, one bit per page */ atomic_t frontswap_pages; /* frontswap pages in-use counter */ @@ -445,6 +451,7 @@ extern void __delete_from_swap_cache(struct page *page, extern void delete_from_swap_cache(struct page *); extern void clear_shadow_from_swap_cache(int type, unsigned long begin, unsigned long end); +extern void free_swap_cache(struct page *); extern void free_page_and_swap_cache(struct page *); extern void free_pages_and_swap_cache(struct page **, int); extern struct page *lookup_swap_cache(swp_entry_t entry, @@ -511,7 +518,7 @@ sector_t swap_page_sector(struct page *page); static inline void put_swap_device(struct swap_info_struct *si) { - rcu_read_unlock(); + percpu_ref_put(&si->users); } #else /* CONFIG_SWAP */ @@ -526,7 +533,20 @@ static inline struct swap_info_struct *swp_swap_info(swp_entry_t entry) return NULL; } -#define swap_address_space(entry) (NULL) +static inline struct swap_info_struct *get_swap_device(swp_entry_t entry) +{ + return NULL; +} + +static inline void put_swap_device(struct swap_info_struct *si) +{ +} + +static inline struct address_space *swap_address_space(swp_entry_t entry) +{ + return NULL; +} + #define get_nr_swap_pages() 0L #define total_swap_pages 0L #define total_swapcache_pages() 0UL @@ -541,12 +561,16 @@ static inline struct swap_info_struct *swp_swap_info(swp_entry_t entry) #define free_pages_and_swap_cache(pages, nr) \ release_pages((pages), (nr)); +static inline void free_swap_cache(struct page *page) +{ +} + static inline void show_swap_cache_info(void) { } -#define free_swap_and_cache(e) ({(is_migration_entry(e) || is_device_private_entry(e));}) -#define swapcache_prepare(e) ({(is_migration_entry(e) || is_device_private_entry(e));}) +/* used to sanity check ptes in zap_pte_range when CONFIG_SWAP=0 */ +#define free_swap_and_cache(e) is_pfn_swap_entry(e) static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask) { diff --git a/include/linux/swapops.h b/include/linux/swapops.h index 6430a94c69..d356ab4047 100644 --- a/include/linux/swapops.h +++ b/include/linux/swapops.h @@ -107,10 +107,14 @@ static inline void *swp_to_radix_entry(swp_entry_t entry) } #if IS_ENABLED(CONFIG_DEVICE_PRIVATE) -static inline swp_entry_t make_device_private_entry(struct page *page, bool write) +static inline swp_entry_t make_readable_device_private_entry(pgoff_t offset) { - return swp_entry(write ? SWP_DEVICE_WRITE : SWP_DEVICE_READ, - page_to_pfn(page)); + return swp_entry(SWP_DEVICE_READ, offset); +} + +static inline swp_entry_t make_writable_device_private_entry(pgoff_t offset) +{ + return swp_entry(SWP_DEVICE_WRITE, offset); } static inline bool is_device_private_entry(swp_entry_t entry) @@ -119,33 +123,40 @@ static inline bool is_device_private_entry(swp_entry_t entry) return type == SWP_DEVICE_READ || type == SWP_DEVICE_WRITE; } -static inline void make_device_private_entry_read(swp_entry_t *entry) -{ - *entry = swp_entry(SWP_DEVICE_READ, swp_offset(*entry)); -} - -static inline bool is_write_device_private_entry(swp_entry_t entry) +static inline bool is_writable_device_private_entry(swp_entry_t entry) { return unlikely(swp_type(entry) == SWP_DEVICE_WRITE); } -static inline unsigned long device_private_entry_to_pfn(swp_entry_t entry) +static inline swp_entry_t make_readable_device_exclusive_entry(pgoff_t offset) { - return swp_offset(entry); + return swp_entry(SWP_DEVICE_EXCLUSIVE_READ, offset); } -static inline struct page *device_private_entry_to_page(swp_entry_t entry) +static inline swp_entry_t make_writable_device_exclusive_entry(pgoff_t offset) { - return pfn_to_page(swp_offset(entry)); + return swp_entry(SWP_DEVICE_EXCLUSIVE_WRITE, offset); +} + +static inline bool is_device_exclusive_entry(swp_entry_t entry) +{ + return swp_type(entry) == SWP_DEVICE_EXCLUSIVE_READ || + swp_type(entry) == SWP_DEVICE_EXCLUSIVE_WRITE; +} + +static inline bool is_writable_device_exclusive_entry(swp_entry_t entry) +{ + return unlikely(swp_type(entry) == SWP_DEVICE_EXCLUSIVE_WRITE); } #else /* CONFIG_DEVICE_PRIVATE */ -static inline swp_entry_t make_device_private_entry(struct page *page, bool write) +static inline swp_entry_t make_readable_device_private_entry(pgoff_t offset) { return swp_entry(0, 0); } -static inline void make_device_private_entry_read(swp_entry_t *entry) +static inline swp_entry_t make_writable_device_private_entry(pgoff_t offset) { + return swp_entry(0, 0); } static inline bool is_device_private_entry(swp_entry_t entry) @@ -153,61 +164,52 @@ static inline bool is_device_private_entry(swp_entry_t entry) return false; } -static inline bool is_write_device_private_entry(swp_entry_t entry) +static inline bool is_writable_device_private_entry(swp_entry_t entry) { return false; } -static inline unsigned long device_private_entry_to_pfn(swp_entry_t entry) +static inline swp_entry_t make_readable_device_exclusive_entry(pgoff_t offset) { - return 0; + return swp_entry(0, 0); } -static inline struct page *device_private_entry_to_page(swp_entry_t entry) +static inline swp_entry_t make_writable_device_exclusive_entry(pgoff_t offset) { - return NULL; + return swp_entry(0, 0); +} + +static inline bool is_device_exclusive_entry(swp_entry_t entry) +{ + return false; +} + +static inline bool is_writable_device_exclusive_entry(swp_entry_t entry) +{ + return false; } #endif /* CONFIG_DEVICE_PRIVATE */ #ifdef CONFIG_MIGRATION -static inline swp_entry_t make_migration_entry(struct page *page, int write) -{ - BUG_ON(!PageLocked(compound_head(page))); - - return swp_entry(write ? SWP_MIGRATION_WRITE : SWP_MIGRATION_READ, - page_to_pfn(page)); -} - static inline int is_migration_entry(swp_entry_t entry) { return unlikely(swp_type(entry) == SWP_MIGRATION_READ || swp_type(entry) == SWP_MIGRATION_WRITE); } -static inline int is_write_migration_entry(swp_entry_t entry) +static inline int is_writable_migration_entry(swp_entry_t entry) { return unlikely(swp_type(entry) == SWP_MIGRATION_WRITE); } -static inline unsigned long migration_entry_to_pfn(swp_entry_t entry) +static inline swp_entry_t make_readable_migration_entry(pgoff_t offset) { - return swp_offset(entry); + return swp_entry(SWP_MIGRATION_READ, offset); } -static inline struct page *migration_entry_to_page(swp_entry_t entry) +static inline swp_entry_t make_writable_migration_entry(pgoff_t offset) { - struct page *p = pfn_to_page(swp_offset(entry)); - /* - * Any use of migration entries may only occur while the - * corresponding page is locked - */ - BUG_ON(!PageLocked(compound_head(p))); - return p; -} - -static inline void make_migration_entry_read(swp_entry_t *entry) -{ - *entry = swp_entry(SWP_MIGRATION_READ, swp_offset(*entry)); + return swp_entry(SWP_MIGRATION_WRITE, offset); } extern void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep, @@ -217,37 +219,58 @@ extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, extern void migration_entry_wait_huge(struct vm_area_struct *vma, struct mm_struct *mm, pte_t *pte); #else +static inline swp_entry_t make_readable_migration_entry(pgoff_t offset) +{ + return swp_entry(0, 0); +} + +static inline swp_entry_t make_writable_migration_entry(pgoff_t offset) +{ + return swp_entry(0, 0); +} -#define make_migration_entry(page, write) swp_entry(0, 0) static inline int is_migration_entry(swp_entry_t swp) { return 0; } -static inline unsigned long migration_entry_to_pfn(swp_entry_t entry) -{ - return 0; -} - -static inline struct page *migration_entry_to_page(swp_entry_t entry) -{ - return NULL; -} - -static inline void make_migration_entry_read(swp_entry_t *entryp) { } static inline void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep, spinlock_t *ptl) { } static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, unsigned long address) { } static inline void migration_entry_wait_huge(struct vm_area_struct *vma, struct mm_struct *mm, pte_t *pte) { } -static inline int is_write_migration_entry(swp_entry_t entry) +static inline int is_writable_migration_entry(swp_entry_t entry) { return 0; } #endif +static inline struct page *pfn_swap_entry_to_page(swp_entry_t entry) +{ + struct page *p = pfn_to_page(swp_offset(entry)); + + /* + * Any use of migration entries may only occur while the + * corresponding page is locked + */ + BUG_ON(is_migration_entry(entry) && !PageLocked(p)); + + return p; +} + +/* + * A pfn swap entry is a special type of swap entry that always has a pfn stored + * in the swap offset. They are used to represent unaddressable device memory + * and to restrict access to a page undergoing migration. + */ +static inline bool is_pfn_swap_entry(swp_entry_t entry) +{ + return is_migration_entry(entry) || is_device_private_entry(entry) || + is_device_exclusive_entry(entry); +} + struct page_vma_mapped_walk; #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION @@ -265,6 +288,8 @@ static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd) if (pmd_swp_soft_dirty(pmd)) pmd = pmd_swp_clear_soft_dirty(pmd); + if (pmd_swp_uffd_wp(pmd)) + pmd = pmd_swp_clear_uffd_wp(pmd); arch_entry = __pmd_to_swp_entry(pmd); return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry)); } @@ -330,6 +355,11 @@ static inline int is_hwpoison_entry(swp_entry_t entry) return swp_type(entry) == SWP_HWPOISON; } +static inline unsigned long hwpoison_entry_to_pfn(swp_entry_t entry) +{ + return swp_offset(entry); +} + static inline void num_poisoned_pages_inc(void) { atomic_long_inc(&num_poisoned_pages); diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 050511e8f1..69c9a70100 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -485,8 +485,8 @@ asmlinkage long sys_pipe2(int __user *fildes, int flags); /* fs/quota.c */ asmlinkage long sys_quotactl(unsigned int cmd, const char __user *special, qid_t id, void __user *addr); -asmlinkage long sys_quotactl_path(unsigned int cmd, const char __user *mountpoint, - qid_t id, void __user *addr); +asmlinkage long sys_quotactl_fd(unsigned int fd, unsigned int cmd, qid_t id, + void __user *addr); /* fs/readdir.c */ asmlinkage long sys_getdents64(unsigned int fd, @@ -1050,6 +1050,7 @@ asmlinkage long sys_landlock_create_ruleset(const struct landlock_ruleset_attr _ asmlinkage long sys_landlock_add_rule(int ruleset_fd, enum landlock_rule_type rule_type, const void __user *rule_attr, __u32 flags); asmlinkage long sys_landlock_restrict_self(int ruleset_fd, __u32 flags); +asmlinkage long sys_memfd_secret(unsigned int flags); /* * Architecture-specific system calls diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h index d76a1ddf83..a12556a4b9 100644 --- a/include/linux/sysfs.h +++ b/include/linux/sysfs.h @@ -162,6 +162,12 @@ static const struct attribute_group _name##_group = { \ }; \ __ATTRIBUTE_GROUPS(_name) +#define BIN_ATTRIBUTE_GROUPS(_name) \ +static const struct attribute_group _name##_group = { \ + .bin_attrs = _name##_attrs, \ +}; \ +__ATTRIBUTE_GROUPS(_name) + struct file; struct vm_area_struct; struct address_space; diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h index 157762db9d..0999f63179 100644 --- a/include/linux/thread_info.h +++ b/include/linux/thread_info.h @@ -9,6 +9,7 @@ #define _LINUX_THREAD_INFO_H #include +#include #include #include #include diff --git a/include/linux/tick.h b/include/linux/tick.h index 1a0ff88fa1..bfd571f18c 100644 --- a/include/linux/tick.h +++ b/include/linux/tick.h @@ -186,13 +186,17 @@ static inline bool tick_nohz_full_enabled(void) return tick_nohz_full_running; } -static inline bool tick_nohz_full_cpu(int cpu) -{ - if (!tick_nohz_full_enabled()) - return false; - - return cpumask_test_cpu(cpu, tick_nohz_full_mask); -} +/* + * Check if a CPU is part of the nohz_full subset. Arrange for evaluating + * the cpu expression (typically smp_processor_id()) _after_ the static + * key. + */ +#define tick_nohz_full_cpu(_cpu) ({ \ + bool __ret = false; \ + if (tick_nohz_full_enabled()) \ + __ret = cpumask_test_cpu((_cpu), tick_nohz_full_mask); \ + __ret; \ +}) static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask) { @@ -208,7 +212,7 @@ extern void tick_nohz_dep_set_task(struct task_struct *tsk, enum tick_dep_bits bit); extern void tick_nohz_dep_clear_task(struct task_struct *tsk, enum tick_dep_bits bit); -extern void tick_nohz_dep_set_signal(struct signal_struct *signal, +extern void tick_nohz_dep_set_signal(struct task_struct *tsk, enum tick_dep_bits bit); extern void tick_nohz_dep_clear_signal(struct signal_struct *signal, enum tick_dep_bits bit); @@ -253,11 +257,11 @@ static inline void tick_dep_clear_task(struct task_struct *tsk, if (tick_nohz_full_enabled()) tick_nohz_dep_clear_task(tsk, bit); } -static inline void tick_dep_set_signal(struct signal_struct *signal, +static inline void tick_dep_set_signal(struct task_struct *tsk, enum tick_dep_bits bit) { if (tick_nohz_full_enabled()) - tick_nohz_dep_set_signal(signal, bit); + tick_nohz_dep_set_signal(tsk, bit); } static inline void tick_dep_clear_signal(struct signal_struct *signal, enum tick_dep_bits bit) @@ -285,7 +289,7 @@ static inline void tick_dep_set_task(struct task_struct *tsk, enum tick_dep_bits bit) { } static inline void tick_dep_clear_task(struct task_struct *tsk, enum tick_dep_bits bit) { } -static inline void tick_dep_set_signal(struct signal_struct *signal, +static inline void tick_dep_set_signal(struct task_struct *tsk, enum tick_dep_bits bit) { } static inline void tick_dep_clear_signal(struct signal_struct *signal, enum tick_dep_bits bit) { } diff --git a/include/linux/timer.h b/include/linux/timer.h index 4118a97e62..fda13c9d12 100644 --- a/include/linux/timer.h +++ b/include/linux/timer.h @@ -192,8 +192,6 @@ extern int try_to_del_timer_sync(struct timer_list *timer); #define del_singleshot_timer_sync(t) del_timer_sync(t) -extern bool timer_curr_running(struct timer_list *timer); - extern void init_timers(void); struct hrtimer; extern enum hrtimer_restart it_real_fn(struct hrtimer *); diff --git a/include/linux/trace.h b/include/linux/trace.h index be1e130ed8..bf169612ff 100644 --- a/include/linux/trace.h +++ b/include/linux/trace.h @@ -41,6 +41,13 @@ int trace_array_init_printk(struct trace_array *tr); void trace_array_put(struct trace_array *tr); struct trace_array *trace_array_get_by_name(const char *name); int trace_array_destroy(struct trace_array *tr); + +/* For osnoise tracer */ +int osnoise_arch_register(void); +void osnoise_arch_unregister(void); +void osnoise_trace_irq_entry(int id); +void osnoise_trace_irq_exit(int id, const char *desc); + #endif /* CONFIG_TRACING */ #endif /* _LINUX_TRACE_H */ diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index 13f65420f1..ab58696d0d 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h @@ -41,7 +41,17 @@ extern int tracepoint_probe_register_prio(struct tracepoint *tp, void *probe, void *data, int prio); extern int +tracepoint_probe_register_prio_may_exist(struct tracepoint *tp, void *probe, void *data, + int prio); +extern int tracepoint_probe_unregister(struct tracepoint *tp, void *probe, void *data); +static inline int +tracepoint_probe_register_may_exist(struct tracepoint *tp, void *probe, + void *data) +{ + return tracepoint_probe_register_prio_may_exist(tp, probe, data, + TRACEPOINT_DEFAULT_PRIO); +} extern void for_each_kernel_tracepoint(void (*fct)(struct tracepoint *tp, void *priv), void *priv); diff --git a/include/linux/tty.h b/include/linux/tty.h index e5d6b1f288..19dc1097e0 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h @@ -243,20 +243,29 @@ struct tty_port { #define TTY_PORT_KOPENED 5 /* device exclusively opened by kernel */ -/* - * Where all of the state associated with a tty is kept while the tty - * is open. Since the termios state should be kept even if the tty - * has been closed --- for things like the baud rate, etc --- it is - * not stored here, but rather a pointer to the real state is stored - * here. Possible the winsize structure should have the same - * treatment, but (1) the default 80x24 is usually right and (2) it's - * most often used by a windowing system, which will set the correct - * size each time the window is created or resized anyway. - * - TYT, 9/14/92 - */ - struct tty_operations; +/** + * struct tty_struct - state associated with a tty while open + * + * @flow.lock: lock for flow members + * @flow.stopped: tty stopped/started by tty_stop/tty_start + * @flow.tco_stopped: tty stopped/started by TCOOFF/TCOON ioctls (it has + * precedense over @flow.stopped) + * @flow.unused: alignment for Alpha, so that no members other than @flow.* are + * modified by the same 64b word store. The @flow's __aligned is + * there for the very same reason. + * @ctrl.lock: lock for ctrl members + * @ctrl.pgrp: process group of this tty (setpgrp(2)) + * @ctrl.session: session of this tty (setsid(2)). Writes are protected by both + * @ctrl.lock and legacy mutex, readers must use at least one of + * them. + * @ctrl.pktstatus: packet mode status (bitwise OR of TIOCPKT_* constants) + * @ctrl.packet: packet mode enabled + * + * All of the state associated with a tty while the tty is open. Persistent + * storage for tty devices is referenced here as @port in struct tty_port. + */ struct tty_struct { int magic; struct kref kref; @@ -274,27 +283,30 @@ struct tty_struct { struct mutex throttle_mutex; struct rw_semaphore termios_rwsem; struct mutex winsize_mutex; - spinlock_t ctrl_lock; - spinlock_t flow_lock; /* Termios values are protected by the termios rwsem */ struct ktermios termios, termios_locked; char name[64]; - struct pid *pgrp; /* Protected by ctrl lock */ - /* - * Writes protected by both ctrl lock and legacy mutex, readers must use - * at least one of them. - */ - struct pid *session; unsigned long flags; int count; struct winsize winsize; /* winsize_mutex */ - unsigned long stopped:1, /* flow_lock */ - flow_stopped:1, - unused:BITS_PER_LONG - 2; + + struct { + spinlock_t lock; + bool stopped; + bool tco_stopped; + unsigned long unused[0]; + } __aligned(sizeof(unsigned long)) flow; + + struct { + spinlock_t lock; + struct pid *pgrp; + struct pid *session; + unsigned char pktstatus; + bool packet; + unsigned long unused[0]; + } __aligned(sizeof(unsigned long)) ctrl; + int hw_stopped; - unsigned long ctrl_status:8, /* ctrl_lock */ - packet:1, - unused_ctrl:BITS_PER_LONG - 9; unsigned int receive_room; /* Bytes free for queue */ int flow_change; @@ -446,10 +458,9 @@ extern void tty_unregister_device(struct tty_driver *driver, unsigned index); extern void tty_write_message(struct tty_struct *tty, char *msg); extern int tty_send_xchar(struct tty_struct *tty, char ch); extern int tty_put_char(struct tty_struct *tty, unsigned char c); -extern int tty_chars_in_buffer(struct tty_struct *tty); -extern int tty_write_room(struct tty_struct *tty); +extern unsigned int tty_chars_in_buffer(struct tty_struct *tty); +extern unsigned int tty_write_room(struct tty_struct *tty); extern void tty_driver_flush_buffer(struct tty_struct *tty); -extern void tty_throttle(struct tty_struct *tty); extern void tty_unthrottle(struct tty_struct *tty); extern int tty_throttle_safe(struct tty_struct *tty); extern int tty_unthrottle_safe(struct tty_struct *tty); @@ -484,6 +495,9 @@ static inline speed_t tty_get_baud_rate(struct tty_struct *tty) return tty_termios_baud_rate(&tty->termios); } +unsigned char tty_get_char_size(unsigned int cflag); +unsigned char tty_get_frame_size(unsigned int cflag); + extern void tty_termios_copy_hw(struct ktermios *new, struct ktermios *old); extern int tty_termios_hw_change(const struct ktermios *a, const struct ktermios *b); extern int tty_set_termios(struct tty_struct *tty, struct ktermios *kt); @@ -624,11 +638,11 @@ static inline int tty_port_users(struct tty_port *port) return port->count + port->blocked_open; } -extern int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc); -extern int tty_unregister_ldisc(int disc); +extern int tty_register_ldisc(struct tty_ldisc_ops *new_ldisc); +extern void tty_unregister_ldisc(struct tty_ldisc_ops *ldisc); extern int tty_set_ldisc(struct tty_struct *tty, int disc); extern int tty_ldisc_receive_buf(struct tty_ldisc *ld, const unsigned char *p, - char *f, int count); + const char *f, int count); /* n_tty.c */ extern void n_tty_inherit_ops(struct tty_ldisc_ops *ops); diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h index 2f719b471d..448f8ee6db 100644 --- a/include/linux/tty_driver.h +++ b/include/linux/tty_driver.h @@ -89,7 +89,7 @@ * * Note: Do not call this function directly, call tty_driver_flush_chars * - * int (*write_room)(struct tty_struct *tty); + * unsigned int (*write_room)(struct tty_struct *tty); * * This routine returns the numbers of characters the tty driver * will accept for queuing to be written. This number is subject @@ -136,7 +136,7 @@ * the line discipline are close to full, and it should somehow * signal that no more characters should be sent to the tty. * - * Optional: Always invoke via tty_throttle(), called under the + * Optional: Always invoke via tty_throttle_safe(), called under the * termios lock. * * void (*unthrottle)(struct tty_struct * tty); @@ -153,7 +153,7 @@ * This routine notifies the tty driver that it should stop * outputting characters to the tty device. * - * Called with ->flow_lock held. Serialized with start() method. + * Called with ->flow.lock held. Serialized with start() method. * * Optional: * @@ -164,7 +164,7 @@ * This routine notifies the tty driver that it resume sending * characters to the tty device. * - * Called with ->flow_lock held. Serialized with stop() method. + * Called with ->flow.lock held. Serialized with stop() method. * * Optional: * @@ -256,8 +256,8 @@ struct tty_operations { const unsigned char *buf, int count); int (*put_char)(struct tty_struct *tty, unsigned char ch); void (*flush_chars)(struct tty_struct *tty); - int (*write_room)(struct tty_struct *tty); - int (*chars_in_buffer)(struct tty_struct *tty); + unsigned int (*write_room)(struct tty_struct *tty); + unsigned int (*chars_in_buffer)(struct tty_struct *tty); int (*ioctl)(struct tty_struct *tty, unsigned int cmd, unsigned long arg); long (*compat_ioctl)(struct tty_struct *tty, diff --git a/include/linux/tty_flip.h b/include/linux/tty_flip.h index 767f62086b..67d78dc553 100644 --- a/include/linux/tty_flip.h +++ b/include/linux/tty_flip.h @@ -2,8 +2,10 @@ #ifndef _LINUX_TTY_FLIP_H #define _LINUX_TTY_FLIP_H +#include + extern int tty_buffer_set_limit(struct tty_port *port, int limit); -extern int tty_buffer_space_avail(struct tty_port *port); +extern unsigned int tty_buffer_space_avail(struct tty_port *port); extern int tty_buffer_request_room(struct tty_port *port, size_t size); extern int tty_insert_flip_string_flags(struct tty_port *port, const unsigned char *chars, const char *flags, size_t size); diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h index 31284b55bd..fbe9de2786 100644 --- a/include/linux/tty_ldisc.h +++ b/include/linux/tty_ldisc.h @@ -201,15 +201,13 @@ struct tty_ldisc_ops { * The following routines are called from below. */ void (*receive_buf)(struct tty_struct *, const unsigned char *cp, - char *fp, int count); + const char *fp, int count); void (*write_wakeup)(struct tty_struct *); void (*dcd_change)(struct tty_struct *, unsigned int); int (*receive_buf2)(struct tty_struct *, const unsigned char *cp, - char *fp, int count); + const char *fp, int count); struct module *owner; - - int refcount; }; struct tty_ldisc { diff --git a/include/linux/uio.h b/include/linux/uio.h index d3ec87706d..82c3c3e819 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h @@ -19,21 +19,17 @@ struct kvec { enum iter_type { /* iter types */ - ITER_IOVEC = 4, - ITER_KVEC = 8, - ITER_BVEC = 16, - ITER_PIPE = 32, - ITER_DISCARD = 64, - ITER_XARRAY = 128, + ITER_IOVEC, + ITER_KVEC, + ITER_BVEC, + ITER_PIPE, + ITER_XARRAY, + ITER_DISCARD, }; struct iov_iter { - /* - * Bit 0 is the read/write bit, set if we're writing. - * Bit 1 is the BVEC_FLAG_NO_REF bit, set if type is a bvec and - * the caller isn't expecting to drop a page reference when done. - */ - unsigned int type; + u8 iter_type; + bool data_source; size_t iov_offset; size_t count; union { @@ -55,7 +51,7 @@ struct iov_iter { static inline enum iter_type iov_iter_type(const struct iov_iter *i) { - return i->type & ~(READ | WRITE); + return i->iter_type; } static inline bool iter_is_iovec(const struct iov_iter *i) @@ -90,7 +86,7 @@ static inline bool iov_iter_is_xarray(const struct iov_iter *i) static inline unsigned char iov_iter_rw(const struct iov_iter *i) { - return i->type & (READ | WRITE); + return i->data_source ? WRITE : READ; } /* @@ -119,11 +115,11 @@ static inline struct iovec iov_iter_iovec(const struct iov_iter *iter) }; } -size_t iov_iter_copy_from_user_atomic(struct page *page, - struct iov_iter *i, unsigned long offset, size_t bytes); +size_t copy_page_from_iter_atomic(struct page *page, unsigned offset, + size_t bytes, struct iov_iter *i); void iov_iter_advance(struct iov_iter *i, size_t bytes); void iov_iter_revert(struct iov_iter *i, size_t bytes); -int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes); +int iov_iter_fault_in_readable(const struct iov_iter *i, size_t bytes); size_t iov_iter_single_seg_count(const struct iov_iter *i); size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, struct iov_iter *i); @@ -132,9 +128,7 @@ size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes, size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i); size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i); -bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i); size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i); -bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i); static __always_inline __must_check size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i) @@ -157,10 +151,11 @@ size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) static __always_inline __must_check bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i) { - if (unlikely(!check_copy_size(addr, bytes, false))) - return false; - else - return _copy_from_iter_full(addr, bytes, i); + size_t copied = copy_from_iter(addr, bytes, i); + if (likely(copied == bytes)) + return true; + iov_iter_revert(i, copied); + return false; } static __always_inline __must_check @@ -175,10 +170,11 @@ size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i) static __always_inline __must_check bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i) { - if (unlikely(!check_copy_size(addr, bytes, false))) - return false; - else - return _copy_from_iter_full_nocache(addr, bytes, i); + size_t copied = copy_from_iter_nocache(addr, bytes, i); + if (likely(copied == bytes)) + return true; + iov_iter_revert(i, copied); + return false; } #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE @@ -278,7 +274,17 @@ struct csum_state { size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csstate, struct iov_iter *i); size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i); -bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i); + +static __always_inline __must_check +bool csum_and_copy_from_iter_full(void *addr, size_t bytes, + __wsum *csum, struct iov_iter *i) +{ + size_t copied = csum_and_copy_from_iter(addr, bytes, csum, i); + if (likely(copied == bytes)) + return true; + iov_iter_revert(i, copied); + return false; +} size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp, struct iov_iter *i); @@ -294,8 +300,4 @@ ssize_t __import_iovec(int type, const struct iovec __user *uvec, int import_single_range(int type, void __user *buf, size_t len, struct iovec *iov, struct iov_iter *i); -int iov_iter_for_each_range(struct iov_iter *i, size_t bytes, - int (*f)(struct kvec *vec, void *context), - void *context); - #endif diff --git a/include/linux/usb.h b/include/linux/usb.h index 454a5604f9..77d9a69534 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -473,12 +473,6 @@ struct usb_dev_state; struct usb_tt; -enum usb_device_removable { - USB_DEVICE_REMOVABLE_UNKNOWN = 0, - USB_DEVICE_REMOVABLE, - USB_DEVICE_FIXED, -}; - enum usb_port_connect_type { USB_PORT_CONNECT_TYPE_UNKNOWN = 0, USB_PORT_CONNECT_TYPE_HOT_PLUG, @@ -703,7 +697,6 @@ struct usb_device { #endif struct wusb_dev *wusb_dev; int slot_id; - enum usb_device_removable removable; struct usb2_lpm_parameters l1_params; struct usb3_lpm_parameters u1_params; struct usb3_lpm_parameters u2_params; @@ -1485,7 +1478,7 @@ typedef void (*usb_complete_t)(struct urb *); * * Note that transfer_buffer must still be set if the controller * does not support DMA (as indicated by hcd_uses_dma()) and when talking - * to root hub. If you have to trasfer between highmem zone and the device + * to root hub. If you have to transfer between highmem zone and the device * on such controller, create a bounce buffer or bail out with an error. * If transfer_buffer cannot be set (is in highmem) and the controller is DMA * capable, assign NULL to it, so that usbmon knows not to use the value. diff --git a/include/linux/usb/cdc-wdm.h b/include/linux/usb/cdc-wdm.h index 9b895f93d8..9f5a51f79b 100644 --- a/include/linux/usb/cdc-wdm.h +++ b/include/linux/usb/cdc-wdm.h @@ -12,11 +12,12 @@ #ifndef __LINUX_USB_CDC_WDM_H #define __LINUX_USB_CDC_WDM_H +#include #include extern struct usb_driver *usb_cdc_wdm_register(struct usb_interface *intf, struct usb_endpoint_descriptor *ep, - int bufsize, + int bufsize, enum wwan_port_type type, int (*manage_power)(struct usb_interface *, int)); #endif /* __LINUX_USB_CDC_WDM_H */ diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h index c71150f2c6..9d27622792 100644 --- a/include/linux/usb/composite.h +++ b/include/linux/usb/composite.h @@ -271,7 +271,7 @@ int config_ep_by_speed(struct usb_gadget *g, struct usb_function *f, * @bConfigurationValue: Copied into configuration descriptor. * @iConfiguration: Copied into configuration descriptor. * @bmAttributes: Copied into configuration descriptor. - * @MaxPower: Power consumtion in mA. Used to compute bMaxPower in the + * @MaxPower: Power consumption in mA. Used to compute bMaxPower in the * configuration descriptor after considering the bus speed. * @cdev: assigned by @usb_add_config() before calling @bind(); this is * the device associated with this configuration. diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h index ee04ef214c..75c7538e35 100644 --- a/include/linux/usb/gadget.h +++ b/include/linux/usb/gadget.h @@ -197,7 +197,7 @@ struct usb_ep_caps { * @name:identifier for the endpoint, such as "ep-a" or "ep9in-bulk" * @ops: Function pointers used to access hardware-specific operations. * @ep_list:the gadget's ep_list holds all of its endpoints - * @caps:The structure describing types and directions supported by endoint. + * @caps:The structure describing types and directions supported by endpoint. * @enabled: The current endpoint enabled/disabled state. * @claimed: True if this endpoint is claimed by a function. * @maxpacket:The maximum packet size used on this endpoint. The initial @@ -325,6 +325,7 @@ struct usb_gadget_ops { void (*udc_set_speed)(struct usb_gadget *, enum usb_device_speed); void (*udc_set_ssp_rate)(struct usb_gadget *gadget, enum usb_ssp_rate rate); + void (*udc_async_callbacks)(struct usb_gadget *gadget, bool enable); struct usb_ep *(*match_ep)(struct usb_gadget *, struct usb_endpoint_descriptor *, struct usb_ss_ep_comp_descriptor *); diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h index a3710dceb2..d397194881 100644 --- a/include/linux/usb/hcd.h +++ b/include/linux/usb/hcd.h @@ -59,7 +59,7 @@ * USB Host Controller Driver (usb_hcd) framework * * Since "struct usb_bus" is so thin, you can't share much code in it. - * This framework is a layer over that, and should be more sharable. + * This framework is a layer over that, and should be more shareable. */ /*-------------------------------------------------------------------------*/ @@ -299,7 +299,7 @@ struct hc_driver { * (optional) these hooks allow an HCD to override the default DMA * mapping and unmapping routines. In general, they shouldn't be * necessary unless the host controller has special DMA requirements, - * such as alignment contraints. If these are not specified, the + * such as alignment constraints. If these are not specified, the * general usb_hcd_(un)?map_urb_for_dma functions will be used instead * (and it may be a good idea to call these functions in your HCD * implementation) @@ -414,7 +414,10 @@ struct hc_driver { int (*find_raw_port_number)(struct usb_hcd *, int); /* Call for power on/off the port if necessary */ int (*port_power)(struct usb_hcd *hcd, int portnum, bool enable); - + /* Call for SINGLE_STEP_SET_FEATURE Test for USB2 EH certification */ +#define EHSET_TEST_SINGLE_STEP_SET_FEATURE 0x06 + int (*submit_single_step_set_feature)(struct usb_hcd *, + struct urb *, int); }; static inline int hcd_giveback_urb_in_bh(struct usb_hcd *hcd) @@ -481,6 +484,14 @@ int usb_hcd_setup_local_mem(struct usb_hcd *hcd, phys_addr_t phys_addr, struct platform_device; extern void usb_hcd_platform_shutdown(struct platform_device *dev); +#ifdef CONFIG_USB_HCD_TEST_MODE +extern int ehset_single_step_set_feature(struct usb_hcd *hcd, int port); +#else +static inline int ehset_single_step_set_feature(struct usb_hcd *hcd, int port) +{ + return 0; +} +#endif /* CONFIG_USB_HCD_TEST_MODE */ #ifdef CONFIG_USB_PCI struct pci_dev; diff --git a/include/linux/usb/otg-fsm.h b/include/linux/usb/otg-fsm.h index e78eb577d0..3aee78dda1 100644 --- a/include/linux/usb/otg-fsm.h +++ b/include/linux/usb/otg-fsm.h @@ -98,7 +98,7 @@ enum otg_fsm_timer { * @b_bus_req: TRUE during the time that the Application running on the * B-device wants to use the bus * - * Auxilary inputs (OTG v1.3 only. Obsolete now.) + * Auxiliary inputs (OTG v1.3 only. Obsolete now.) * @a_sess_vld: TRUE if the A-device detects that VBUS is above VA_SESS_VLD * @b_bus_suspend: TRUE when the A-device detects that the B-device has put * the bus into suspend @@ -153,7 +153,7 @@ struct otg_fsm { int a_bus_req; int b_bus_req; - /* Auxilary inputs */ + /* Auxiliary inputs */ int a_sess_vld; int b_bus_resume; int b_bus_suspend; @@ -177,7 +177,7 @@ struct otg_fsm { int a_bus_req_inf; int a_clr_err_inf; int b_bus_req_inf; - /* Auxilary informative variables */ + /* Auxiliary informative variables */ int a_suspend_req_inf; /* Timeout indicator for timers */ diff --git a/include/linux/usb/otg.h b/include/linux/usb/otg.h index 69f1b63285..7ceeecbb9e 100644 --- a/include/linux/usb/otg.h +++ b/include/linux/usb/otg.h @@ -125,7 +125,7 @@ enum usb_dr_mode { * @dev: Pointer to the given device * * The function gets phy interface string from property 'dr_mode', - * and returns the correspondig enum usb_dr_mode + * and returns the corresponding enum usb_dr_mode */ extern enum usb_dr_mode usb_get_dr_mode(struct device *dev); diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h index 5e4c497f54..eeb7c2157c 100644 --- a/include/linux/usb/quirks.h +++ b/include/linux/usb/quirks.h @@ -32,7 +32,7 @@ #define USB_QUIRK_DELAY_INIT BIT(6) /* - * For high speed and super speed interupt endpoints, the USB 2.0 and + * For high speed and super speed interrupt endpoints, the USB 2.0 and * USB 3.0 spec require the interval in microframes * (1 microframe = 125 microseconds) to be calculated as * interval = 2 ^ (bInterval-1). diff --git a/include/linux/usb/role.h b/include/linux/usb/role.h index 0164fed31b..031f148ab3 100644 --- a/include/linux/usb/role.h +++ b/include/linux/usb/role.h @@ -65,6 +65,7 @@ void usb_role_switch_unregister(struct usb_role_switch *sw); void usb_role_switch_set_drvdata(struct usb_role_switch *sw, void *data); void *usb_role_switch_get_drvdata(struct usb_role_switch *sw); +const char *usb_role_string(enum usb_role role); #else static inline int usb_role_switch_set_role(struct usb_role_switch *sw, enum usb_role role) @@ -109,6 +110,11 @@ static inline void *usb_role_switch_get_drvdata(struct usb_role_switch *sw) return NULL; } +static inline const char *usb_role_string(enum usb_role role) +{ + return "unknown"; +} + #endif #endif /* __LINUX_USB_ROLE_H */ diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h index 8c63fa9bfc..16ea5a4cc5 100644 --- a/include/linux/usb/serial.h +++ b/include/linux/usb/serial.h @@ -276,7 +276,7 @@ struct usb_serial_driver { int (*write)(struct tty_struct *tty, struct usb_serial_port *port, const unsigned char *buf, int count); /* Called only by the tty layer */ - int (*write_room)(struct tty_struct *tty); + unsigned int (*write_room)(struct tty_struct *tty); int (*ioctl)(struct tty_struct *tty, unsigned int cmd, unsigned long arg); void (*get_serial)(struct tty_struct *tty, struct serial_struct *ss); @@ -284,7 +284,7 @@ struct usb_serial_driver { void (*set_termios)(struct tty_struct *tty, struct usb_serial_port *port, struct ktermios *old); void (*break_ctl)(struct tty_struct *tty, int break_state); - int (*chars_in_buffer)(struct tty_struct *tty); + unsigned int (*chars_in_buffer)(struct tty_struct *tty); void (*wait_until_sent)(struct tty_struct *tty, long timeout); bool (*tx_empty)(struct usb_serial_port *port); void (*throttle)(struct tty_struct *tty); @@ -347,8 +347,8 @@ int usb_serial_generic_write(struct tty_struct *tty, struct usb_serial_port *por const unsigned char *buf, int count); void usb_serial_generic_close(struct usb_serial_port *port); int usb_serial_generic_resume(struct usb_serial *serial); -int usb_serial_generic_write_room(struct tty_struct *tty); -int usb_serial_generic_chars_in_buffer(struct tty_struct *tty); +unsigned int usb_serial_generic_write_room(struct tty_struct *tty); +unsigned int usb_serial_generic_chars_in_buffer(struct tty_struct *tty); void usb_serial_generic_wait_until_sent(struct tty_struct *tty, long timeout); void usb_serial_generic_read_bulk_callback(struct urb *urb); void usb_serial_generic_write_bulk_callback(struct urb *urb); @@ -395,7 +395,7 @@ static inline void usb_serial_debug_data(struct device *dev, } /* - * Macro for reporting errors in write path to avoid inifinite loop + * Macro for reporting errors in write path to avoid infinite loop * when port is used as a console. */ #define dev_err_console(usport, fmt, ...) \ diff --git a/include/linux/usb/tcpm.h b/include/linux/usb/tcpm.h index 42fcfbe105..bffc8d3e14 100644 --- a/include/linux/usb/tcpm.h +++ b/include/linux/usb/tcpm.h @@ -66,6 +66,8 @@ enum tcpm_transmit_type { * For example, some tcpcs may include BC1.2 charger detection * and use that in this case. * @set_cc: Called to set value of CC pins + * @apply_rc: Optional; Needed to move TCPCI based chipset to APPLY_RC state + * as stated by the TCPCI specification. * @get_cc: Called to read current CC pin values * @set_polarity: * Called to set polarity @@ -120,6 +122,8 @@ struct tcpc_dev { int (*get_vbus)(struct tcpc_dev *dev); int (*get_current_limit)(struct tcpc_dev *dev); int (*set_cc)(struct tcpc_dev *dev, enum typec_cc_status cc); + int (*apply_rc)(struct tcpc_dev *dev, enum typec_cc_status cc, + enum typec_cc_polarity polarity); int (*get_cc)(struct tcpc_dev *dev, enum typec_cc_status *cc1, enum typec_cc_status *cc2); int (*set_polarity)(struct tcpc_dev *dev, diff --git a/include/linux/usb/typec_dp.h b/include/linux/usb/typec_dp.h index fc4c7edb2e..cfb916cccd 100644 --- a/include/linux/usb/typec_dp.h +++ b/include/linux/usb/typec_dp.h @@ -97,7 +97,7 @@ enum { #define DP_CONF_PIN_ASSIGNEMENT_SHIFT 8 #define DP_CONF_PIN_ASSIGNEMENT_MASK GENMASK(15, 8) -/* Helper for setting/getting the pin assignement value to the configuration */ +/* Helper for setting/getting the pin assignment value to the configuration */ #define DP_CONF_SET_PIN_ASSIGN(_a_) ((_a_) << 8) #define DP_CONF_GET_PIN_ASSIGN(_conf_) (((_conf_) & GENMASK(15, 8)) >> 8) diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h index 1d08dbbcfe..eb70cabe6e 100644 --- a/include/linux/user_namespace.h +++ b/include/linux/user_namespace.h @@ -54,9 +54,15 @@ enum ucount_type { UCOUNT_FANOTIFY_GROUPS, UCOUNT_FANOTIFY_MARKS, #endif + UCOUNT_RLIMIT_NPROC, + UCOUNT_RLIMIT_MSGQUEUE, + UCOUNT_RLIMIT_SIGPENDING, + UCOUNT_RLIMIT_MEMLOCK, UCOUNT_COUNTS, }; +#define MAX_PER_NAMESPACE_UCOUNTS UCOUNT_RLIMIT_NPROC + struct user_namespace { struct uid_gid_map uid_map; struct uid_gid_map gid_map; @@ -92,23 +98,42 @@ struct user_namespace { struct ctl_table_header *sysctls; #endif struct ucounts *ucounts; - int ucount_max[UCOUNT_COUNTS]; + long ucount_max[UCOUNT_COUNTS]; } __randomize_layout; struct ucounts { struct hlist_node node; struct user_namespace *ns; kuid_t uid; - int count; - atomic_t ucount[UCOUNT_COUNTS]; + atomic_t count; + atomic_long_t ucount[UCOUNT_COUNTS]; }; extern struct user_namespace init_user_ns; +extern struct ucounts init_ucounts; bool setup_userns_sysctls(struct user_namespace *ns); void retire_userns_sysctls(struct user_namespace *ns); struct ucounts *inc_ucount(struct user_namespace *ns, kuid_t uid, enum ucount_type type); void dec_ucount(struct ucounts *ucounts, enum ucount_type type); +struct ucounts *alloc_ucounts(struct user_namespace *ns, kuid_t uid); +struct ucounts * __must_check get_ucounts(struct ucounts *ucounts); +void put_ucounts(struct ucounts *ucounts); + +static inline long get_ucounts_value(struct ucounts *ucounts, enum ucount_type type) +{ + return atomic_long_read(&ucounts->ucount[type]); +} + +long inc_rlimit_ucounts(struct ucounts *ucounts, enum ucount_type type, long v); +bool dec_rlimit_ucounts(struct ucounts *ucounts, enum ucount_type type, long v); +bool is_ucounts_overlimit(struct ucounts *ucounts, enum ucount_type type, unsigned long max); + +static inline void set_rlimit_ucount_max(struct user_namespace *ns, + enum ucount_type type, unsigned long max) +{ + ns->ucount_max[type] = max <= LONG_MAX ? max : LONG_MAX; +} #ifdef CONFIG_USER_NS diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h index 794d1538b8..331d2ccf0b 100644 --- a/include/linux/userfaultfd_k.h +++ b/include/linux/userfaultfd_k.h @@ -53,6 +53,11 @@ enum mcopy_atomic_mode { MCOPY_ATOMIC_CONTINUE, }; +extern int mfill_atomic_install_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd, + struct vm_area_struct *dst_vma, + unsigned long dst_addr, struct page *page, + bool newly_allocated, bool wp_copy); + extern ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start, unsigned long src_start, unsigned long len, bool *mmap_changing, __u64 mode); diff --git a/include/linux/vdpa.h b/include/linux/vdpa.h index f311d227aa..3357ac9887 100644 --- a/include/linux/vdpa.h +++ b/include/linux/vdpa.h @@ -28,13 +28,34 @@ struct vdpa_notification_area { }; /** - * struct vdpa_vq_state - vDPA vq_state definition + * struct vdpa_vq_state_split - vDPA split virtqueue state * @avail_index: available index */ -struct vdpa_vq_state { +struct vdpa_vq_state_split { u16 avail_index; }; +/** + * struct vdpa_vq_state_packed - vDPA packed virtqueue state + * @last_avail_counter: last driver ring wrap counter observed by device + * @last_avail_idx: device available index + * @last_used_counter: device ring wrap counter + * @last_used_idx: used index + */ +struct vdpa_vq_state_packed { + u16 last_avail_counter:1; + u16 last_avail_idx:15; + u16 last_used_counter:1; + u16 last_used_idx:15; +}; + +struct vdpa_vq_state { + union { + struct vdpa_vq_state_split split; + struct vdpa_vq_state_packed packed; + }; +}; + struct vdpa_mgmt_dev; /** diff --git a/include/linux/virtio_pci_modern.h b/include/linux/virtio_pci_modern.h index 6a95b58fd0..eb2bd9b407 100644 --- a/include/linux/virtio_pci_modern.h +++ b/include/linux/virtio_pci_modern.h @@ -79,6 +79,7 @@ static inline void vp_iowrite64_twopart(u64 val, } u64 vp_modern_get_features(struct virtio_pci_modern_device *mdev); +u64 vp_modern_get_driver_features(struct virtio_pci_modern_device *mdev); void vp_modern_set_features(struct virtio_pci_modern_device *mdev, u64 features); u32 vp_modern_generation(struct virtio_pci_modern_device *mdev); diff --git a/include/linux/virtio_vsock.h b/include/linux/virtio_vsock.h index dc636b7271..35d7eedb5e 100644 --- a/include/linux/virtio_vsock.h +++ b/include/linux/virtio_vsock.h @@ -36,6 +36,7 @@ struct virtio_vsock_sock { u32 rx_bytes; u32 buf_alloc; struct list_head rx_queue; + u32 msg_count; }; struct virtio_vsock_pkt { @@ -80,8 +81,17 @@ virtio_transport_dgram_dequeue(struct vsock_sock *vsk, struct msghdr *msg, size_t len, int flags); +int +virtio_transport_seqpacket_enqueue(struct vsock_sock *vsk, + struct msghdr *msg, + size_t len); +ssize_t +virtio_transport_seqpacket_dequeue(struct vsock_sock *vsk, + struct msghdr *msg, + int flags); s64 virtio_transport_stream_has_data(struct vsock_sock *vsk); s64 virtio_transport_stream_has_space(struct vsock_sock *vsk); +u32 virtio_transport_seqpacket_has_data(struct vsock_sock *vsk); int virtio_transport_do_socket_init(struct vsock_sock *vsk, struct vsock_sock *psk); diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index bfaaf0b6fa..2644425b6d 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -29,7 +29,7 @@ struct notifier_block; /* in notifier.h */ #define VM_NO_HUGE_VMAP 0x00000400 /* force PAGE_SIZE pte mapping */ /* - * VM_KASAN is used slighly differently depending on CONFIG_KASAN_VMALLOC. + * VM_KASAN is used slightly differently depending on CONFIG_KASAN_VMALLOC. * * If IS_ENABLED(CONFIG_KASAN_VMALLOC), VM_KASAN is set on a vm_struct after * shadow memory has been mapped. It's used to handle allocation errors so that @@ -104,6 +104,21 @@ static inline bool arch_vmap_pmd_supported(pgprot_t prot) } #endif +#ifndef arch_vmap_pte_range_map_size +static inline unsigned long arch_vmap_pte_range_map_size(unsigned long addr, unsigned long end, + u64 pfn, unsigned int max_page_shift) +{ + return PAGE_SIZE; +} +#endif + +#ifndef arch_vmap_pte_supported_shift +static inline int arch_vmap_pte_supported_shift(unsigned long size) +{ + return PAGE_SHIFT; +} +#endif + /* * Highlevel APIs for driver use */ @@ -232,7 +247,7 @@ static inline void set_vm_flush_reset_perms(void *addr) extern long vread(char *buf, char *addr, unsigned long count); /* - * Internals. Dont't use.. + * Internals. Don't use.. */ extern struct list_head vmap_area_list; extern __init void vm_area_add_early(struct vm_struct *vm); diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index 3299cd69e4..d6a6cf53b1 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -138,34 +138,27 @@ static inline void vm_events_fold_cpu(int cpu) * Zone and node-based page accounting with per cpu differentials. */ extern atomic_long_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS]; -extern atomic_long_t vm_numa_stat[NR_VM_NUMA_STAT_ITEMS]; extern atomic_long_t vm_node_stat[NR_VM_NODE_STAT_ITEMS]; +extern atomic_long_t vm_numa_event[NR_VM_NUMA_EVENT_ITEMS]; #ifdef CONFIG_NUMA -static inline void zone_numa_state_add(long x, struct zone *zone, - enum numa_stat_item item) +static inline void zone_numa_event_add(long x, struct zone *zone, + enum numa_stat_item item) { - atomic_long_add(x, &zone->vm_numa_stat[item]); - atomic_long_add(x, &vm_numa_stat[item]); + atomic_long_add(x, &zone->vm_numa_event[item]); + atomic_long_add(x, &vm_numa_event[item]); } -static inline unsigned long global_numa_state(enum numa_stat_item item) -{ - long x = atomic_long_read(&vm_numa_stat[item]); - - return x; -} - -static inline unsigned long zone_numa_state_snapshot(struct zone *zone, +static inline unsigned long zone_numa_event_state(struct zone *zone, enum numa_stat_item item) { - long x = atomic_long_read(&zone->vm_numa_stat[item]); - int cpu; + return atomic_long_read(&zone->vm_numa_event[item]); +} - for_each_online_cpu(cpu) - x += per_cpu_ptr(zone->pageset, cpu)->vm_numa_stat_diff[item]; - - return x; +static inline unsigned long +global_numa_event_state(enum numa_stat_item item) +{ + return atomic_long_read(&vm_numa_event[item]); } #endif /* CONFIG_NUMA */ @@ -236,7 +229,7 @@ static inline unsigned long zone_page_state_snapshot(struct zone *zone, #ifdef CONFIG_SMP int cpu; for_each_online_cpu(cpu) - x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item]; + x += per_cpu_ptr(zone->per_cpu_zonestats, cpu)->vm_stat_diff[item]; if (x < 0) x = 0; @@ -245,18 +238,38 @@ static inline unsigned long zone_page_state_snapshot(struct zone *zone, } #ifdef CONFIG_NUMA -extern void __inc_numa_state(struct zone *zone, enum numa_stat_item item); +/* See __count_vm_event comment on why raw_cpu_inc is used. */ +static inline void +__count_numa_event(struct zone *zone, enum numa_stat_item item) +{ + struct per_cpu_zonestat __percpu *pzstats = zone->per_cpu_zonestats; + + raw_cpu_inc(pzstats->vm_numa_event[item]); +} + +static inline void +__count_numa_events(struct zone *zone, enum numa_stat_item item, long delta) +{ + struct per_cpu_zonestat __percpu *pzstats = zone->per_cpu_zonestats; + + raw_cpu_add(pzstats->vm_numa_event[item], delta); +} + extern unsigned long sum_zone_node_page_state(int node, enum zone_stat_item item); -extern unsigned long sum_zone_numa_state(int node, enum numa_stat_item item); +extern unsigned long sum_zone_numa_event_state(int node, enum numa_stat_item item); extern unsigned long node_page_state(struct pglist_data *pgdat, enum node_stat_item item); extern unsigned long node_page_state_pages(struct pglist_data *pgdat, enum node_stat_item item); +extern void fold_vm_numa_events(void); #else #define sum_zone_node_page_state(node, item) global_zone_page_state(item) #define node_page_state(node, item) global_node_page_state(item) #define node_page_state_pages(node, item) global_node_page_state_pages(item) +static inline void fold_vm_numa_events(void) +{ +} #endif /* CONFIG_NUMA */ #ifdef CONFIG_SMP @@ -291,7 +304,7 @@ struct ctl_table; int vmstat_refresh(struct ctl_table *, int write, void *buffer, size_t *lenp, loff_t *ppos); -void drain_zonestat(struct zone *zone, struct per_cpu_pageset *); +void drain_zonestat(struct zone *zone, struct per_cpu_zonestat *); int calculate_pressure_threshold(struct zone *zone); int calculate_normal_threshold(struct zone *zone); @@ -399,7 +412,7 @@ static inline void cpu_vm_stats_fold(int cpu) { } static inline void quiet_vmstat(void) { } static inline void drain_zonestat(struct zone *zone, - struct per_cpu_pageset *pset) { } + struct per_cpu_zonestat *pzstats) { } #endif /* CONFIG_SMP */ static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages, @@ -428,7 +441,7 @@ static inline const char *numa_stat_name(enum numa_stat_item item) static inline const char *node_stat_name(enum node_stat_item item) { return vmstat_text[NR_VM_ZONE_STAT_ITEMS + - NR_VM_NUMA_STAT_ITEMS + + NR_VM_NUMA_EVENT_ITEMS + item]; } @@ -440,7 +453,7 @@ static inline const char *lru_list_name(enum lru_list lru) static inline const char *writeback_stat_name(enum writeback_stat_item item) { return vmstat_text[NR_VM_ZONE_STAT_ITEMS + - NR_VM_NUMA_STAT_ITEMS + + NR_VM_NUMA_EVENT_ITEMS + NR_VM_NODE_STAT_ITEMS + item]; } @@ -449,7 +462,7 @@ static inline const char *writeback_stat_name(enum writeback_stat_item item) static inline const char *vm_event_name(enum vm_event_item item) { return vmstat_text[NR_VM_ZONE_STAT_ITEMS + - NR_VM_NUMA_STAT_ITEMS + + NR_VM_NUMA_EVENT_ITEMS + NR_VM_NODE_STAT_ITEMS + NR_VM_WRITEBACK_STAT_ITEMS + item]; diff --git a/include/linux/vt_kern.h b/include/linux/vt_kern.h index 94e7a31547..0da94a6dee 100644 --- a/include/linux/vt_kern.h +++ b/include/linux/vt_kern.h @@ -166,7 +166,6 @@ int vt_get_kbd_mode_bit(int console, int bit); void vt_set_kbd_mode_bit(int console, int bit); void vt_clr_kbd_mode_bit(int console, int bit); void vt_set_led_state(int console, int leds); -void vt_set_led_state(int console, int leds); void vt_kbd_con_start(int console); void vt_kbd_con_stop(int console); diff --git a/include/linux/writeback.h b/include/linux/writeback.h index 8e5c5bb16e..667e86cfbd 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h @@ -221,6 +221,7 @@ void wbc_account_cgroup_owner(struct writeback_control *wbc, struct page *page, int cgroup_writeback_by_id(u64 bdi_id, int memcg_id, unsigned long nr_pages, enum wb_reason reason, struct wb_completion *done); void cgroup_writeback_umount(void); +bool cleanup_offline_cgwb(struct bdi_writeback *wb); /** * inode_attach_wb - associate an inode with its wb @@ -360,7 +361,6 @@ extern unsigned int dirty_writeback_interval; extern unsigned int dirty_expire_interval; extern unsigned int dirtytime_expire_interval; extern int vm_highmem_is_dirtyable; -extern int block_dump; extern int laptop_mode; int dirty_background_ratio_handler(struct ctl_table *table, int write, diff --git a/include/linux/wwan.h b/include/linux/wwan.h index aa05a253dc..9fac819f92 100644 --- a/include/linux/wwan.h +++ b/include/linux/wwan.h @@ -6,7 +6,10 @@ #include #include +#include #include +#include +#include /** * enum wwan_port_type - WWAN port types @@ -15,7 +18,10 @@ * @WWAN_PORT_QMI: Qcom modem/MSM interface for modem control * @WWAN_PORT_QCDM: Qcom Modem diagnostic interface * @WWAN_PORT_FIREHOSE: XML based command protocol - * @WWAN_PORT_MAX: Number of supported port types + * + * @WWAN_PORT_MAX: Highest supported port types + * @WWAN_PORT_UNKNOWN: Special value to indicate an unknown port type + * @__WWAN_PORT_MAX: Internal use */ enum wwan_port_type { WWAN_PORT_AT, @@ -23,7 +29,12 @@ enum wwan_port_type { WWAN_PORT_QMI, WWAN_PORT_QCDM, WWAN_PORT_FIREHOSE, - WWAN_PORT_MAX, + + /* Add new port types above this line */ + + __WWAN_PORT_MAX, + WWAN_PORT_MAX = __WWAN_PORT_MAX - 1, + WWAN_PORT_UNKNOWN, }; struct wwan_port; @@ -31,15 +42,23 @@ struct wwan_port; /** struct wwan_port_ops - The WWAN port operations * @start: The routine for starting the WWAN port device. * @stop: The routine for stopping the WWAN port device. - * @tx: The routine that sends WWAN port protocol data to the device. + * @tx: Non-blocking routine that sends WWAN port protocol data to the device. + * @tx_blocking: Optional blocking routine that sends WWAN port protocol data + * to the device. + * @tx_poll: Optional routine that sets additional TX poll flags. * * The wwan_port_ops structure contains a list of low-level operations - * that control a WWAN port device. All functions are mandatory. + * that control a WWAN port device. All functions are mandatory unless specified. */ struct wwan_port_ops { int (*start)(struct wwan_port *port); void (*stop)(struct wwan_port *port); int (*tx)(struct wwan_port *port, struct sk_buff *skb); + + /* Optional operations */ + int (*tx_blocking)(struct wwan_port *port, struct sk_buff *skb); + __poll_t (*tx_poll)(struct wwan_port *port, struct file *filp, + poll_table *wait); }; /** @@ -108,4 +127,48 @@ void wwan_port_txon(struct wwan_port *port); */ void *wwan_port_get_drvdata(struct wwan_port *port); +/** + * struct wwan_netdev_priv - WWAN core network device private data + * @link_id: WWAN device data link id + * @drv_priv: driver private data area, size is determined in &wwan_ops + */ +struct wwan_netdev_priv { + u32 link_id; + + /* must be last */ + u8 drv_priv[] __aligned(sizeof(void *)); +}; + +static inline void *wwan_netdev_drvpriv(struct net_device *dev) +{ + return ((struct wwan_netdev_priv *)netdev_priv(dev))->drv_priv; +} + +/* + * Used to indicate that the WWAN core should not create a default network + * link. + */ +#define WWAN_NO_DEFAULT_LINK U32_MAX + +/** + * struct wwan_ops - WWAN device ops + * @priv_size: size of private netdev data area + * @setup: set up a new netdev + * @newlink: register the new netdev + * @dellink: remove the given netdev + */ +struct wwan_ops { + unsigned int priv_size; + void (*setup)(struct net_device *dev); + int (*newlink)(void *ctxt, struct net_device *dev, + u32 if_id, struct netlink_ext_ack *extack); + void (*dellink)(void *ctxt, struct net_device *dev, + struct list_head *head); +}; + +int wwan_register_ops(struct device *parent, const struct wwan_ops *ops, + void *ctxt, u32 def_link_id); + +void wwan_unregister_ops(struct device *parent); + #endif /* __WWAN_H */ diff --git a/include/math-emu/op-common.h b/include/math-emu/op-common.h index 143568d64b..4b57bbba58 100644 --- a/include/math-emu/op-common.h +++ b/include/math-emu/op-common.h @@ -338,7 +338,7 @@ do { \ FP_SET_EXCEPTION(FP_EX_INVALID | FP_EX_INVALID_ISI); \ break; \ } \ - /* FALLTHRU */ \ + fallthrough; \ \ case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_NORMAL): \ case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_ZERO): \ diff --git a/include/media/hevc-ctrls.h b/include/media/hevc-ctrls.h index cd51fb6df1..7cbbbf055f 100644 --- a/include/media/hevc-ctrls.h +++ b/include/media/hevc-ctrls.h @@ -20,6 +20,7 @@ #define V4L2_CID_MPEG_VIDEO_HEVC_PPS (V4L2_CID_CODEC_BASE + 1009) #define V4L2_CID_MPEG_VIDEO_HEVC_SLICE_PARAMS (V4L2_CID_CODEC_BASE + 1010) #define V4L2_CID_MPEG_VIDEO_HEVC_SCALING_MATRIX (V4L2_CID_CODEC_BASE + 1011) +#define V4L2_CID_MPEG_VIDEO_HEVC_DECODE_PARAMS (V4L2_CID_CODEC_BASE + 1012) #define V4L2_CID_MPEG_VIDEO_HEVC_DECODE_MODE (V4L2_CID_CODEC_BASE + 1015) #define V4L2_CID_MPEG_VIDEO_HEVC_START_CODE (V4L2_CID_CODEC_BASE + 1016) @@ -28,6 +29,7 @@ #define V4L2_CTRL_TYPE_HEVC_PPS 0x0121 #define V4L2_CTRL_TYPE_HEVC_SLICE_PARAMS 0x0122 #define V4L2_CTRL_TYPE_HEVC_SCALING_MATRIX 0x0123 +#define V4L2_CTRL_TYPE_HEVC_DECODE_PARAMS 0x0124 enum v4l2_mpeg_video_hevc_decode_mode { V4L2_MPEG_VIDEO_HEVC_DECODE_MODE_SLICE_BASED, @@ -77,13 +79,12 @@ struct v4l2_ctrl_hevc_sps { __u8 num_short_term_ref_pic_sets; __u8 num_long_term_ref_pics_sps; __u8 chroma_format_idc; - - __u8 padding; + __u8 sps_max_sub_layers_minus1; __u64 flags; }; -#define V4L2_HEVC_PPS_FLAG_DEPENDENT_SLICE_SEGMENT (1ULL << 0) +#define V4L2_HEVC_PPS_FLAG_DEPENDENT_SLICE_SEGMENT_ENABLED (1ULL << 0) #define V4L2_HEVC_PPS_FLAG_OUTPUT_FLAG_PRESENT (1ULL << 1) #define V4L2_HEVC_PPS_FLAG_SIGN_DATA_HIDING_ENABLED (1ULL << 2) #define V4L2_HEVC_PPS_FLAG_CABAC_INIT_PRESENT (1ULL << 3) @@ -102,10 +103,14 @@ struct v4l2_ctrl_hevc_sps { #define V4L2_HEVC_PPS_FLAG_PPS_DISABLE_DEBLOCKING_FILTER (1ULL << 16) #define V4L2_HEVC_PPS_FLAG_LISTS_MODIFICATION_PRESENT (1ULL << 17) #define V4L2_HEVC_PPS_FLAG_SLICE_SEGMENT_HEADER_EXTENSION_PRESENT (1ULL << 18) +#define V4L2_HEVC_PPS_FLAG_DEBLOCKING_FILTER_CONTROL_PRESENT (1ULL << 19) +#define V4L2_HEVC_PPS_FLAG_UNIFORM_SPACING (1ULL << 20) struct v4l2_ctrl_hevc_pps { /* ISO/IEC 23008-2, ITU-T Rec. H.265: Picture parameter set */ __u8 num_extra_slice_header_bits; + __u8 num_ref_idx_l0_default_active_minus1; + __u8 num_ref_idx_l1_default_active_minus1; __s8 init_qp_minus26; __u8 diff_cu_qp_delta_depth; __s8 pps_cb_qp_offset; @@ -197,27 +202,49 @@ struct v4l2_ctrl_hevc_slice_params { __u8 pic_struct; /* ISO/IEC 23008-2, ITU-T Rec. H.265: General slice segment header */ - __u8 num_active_dpb_entries; __u8 ref_idx_l0[V4L2_HEVC_DPB_ENTRIES_NUM_MAX]; __u8 ref_idx_l1[V4L2_HEVC_DPB_ENTRIES_NUM_MAX]; - __u8 num_rps_poc_st_curr_before; - __u8 num_rps_poc_st_curr_after; - __u8 num_rps_poc_lt_curr; - - __u8 padding; + __u8 padding[5]; __u32 entry_point_offset_minus1[256]; - /* ISO/IEC 23008-2, ITU-T Rec. H.265: General slice segment header */ - struct v4l2_hevc_dpb_entry dpb[V4L2_HEVC_DPB_ENTRIES_NUM_MAX]; - /* ISO/IEC 23008-2, ITU-T Rec. H.265: Weighted prediction parameter */ struct v4l2_hevc_pred_weight_table pred_weight_table; __u64 flags; }; +#define V4L2_HEVC_DECODE_PARAM_FLAG_IRAP_PIC 0x1 +#define V4L2_HEVC_DECODE_PARAM_FLAG_IDR_PIC 0x2 +#define V4L2_HEVC_DECODE_PARAM_FLAG_NO_OUTPUT_OF_PRIOR 0x4 + +struct v4l2_ctrl_hevc_decode_params { + __s32 pic_order_cnt_val; + __u8 num_active_dpb_entries; + struct v4l2_hevc_dpb_entry dpb[V4L2_HEVC_DPB_ENTRIES_NUM_MAX]; + __u8 num_poc_st_curr_before; + __u8 num_poc_st_curr_after; + __u8 num_poc_lt_curr; + __u8 poc_st_curr_before[V4L2_HEVC_DPB_ENTRIES_NUM_MAX]; + __u8 poc_st_curr_after[V4L2_HEVC_DPB_ENTRIES_NUM_MAX]; + __u8 poc_lt_curr[V4L2_HEVC_DPB_ENTRIES_NUM_MAX]; + __u64 flags; +}; + +/* MPEG-class control IDs specific to the Hantro driver as defined by V4L2 */ +#define V4L2_CID_CODEC_HANTRO_BASE (V4L2_CTRL_CLASS_CODEC | 0x1200) +/* + * V4L2_CID_HANTRO_HEVC_SLICE_HEADER_SKIP - + * the number of data (in bits) to skip in the + * slice segment header. + * If non-IDR, the bits to be skipped go from syntax element "pic_output_flag" + * to before syntax element "slice_temporal_mvp_enabled_flag". + * If IDR, the skipped bits are just "pic_output_flag" + * (separate_colour_plane_flag is not supported). + */ +#define V4L2_CID_HANTRO_HEVC_SLICE_HEADER_SKIP (V4L2_CID_CODEC_HANTRO_BASE + 0) + struct v4l2_ctrl_hevc_scaling_matrix { __u8 scaling_list_4x4[6][16]; __u8 scaling_list_8x8[6][64]; diff --git a/include/media/media-dev-allocator.h b/include/media/media-dev-allocator.h index b35ea60625..2ab54d426c 100644 --- a/include/media/media-dev-allocator.h +++ b/include/media/media-dev-allocator.h @@ -19,7 +19,7 @@ struct usb_device; -#if defined(CONFIG_MEDIA_CONTROLLER) && defined(CONFIG_USB) +#if defined(CONFIG_MEDIA_CONTROLLER) && IS_ENABLED(CONFIG_USB) /** * media_device_usb_allocate() - Allocate and return struct &media device * diff --git a/include/media/rc-map.h b/include/media/rc-map.h index b5585d14ff..793b54342d 100644 --- a/include/media/rc-map.h +++ b/include/media/rc-map.h @@ -231,6 +231,7 @@ struct rc_map *rc_map_get(const char *name); #define RC_MAP_CEC "rc-cec" #define RC_MAP_CINERGY "rc-cinergy" #define RC_MAP_CINERGY_1400 "rc-cinergy-1400" +#define RC_MAP_CT_90405 "rc-ct-90405" #define RC_MAP_D680_DMB "rc-d680-dmb" #define RC_MAP_DELOCK_61959 "rc-delock-61959" #define RC_MAP_DIB0700_NEC_TABLE "rc-dib0700-nec" @@ -312,7 +313,6 @@ struct rc_map *rc_map_get(const char *name); #define RC_MAP_SNAPSTREAM_FIREFLY "rc-snapstream-firefly" #define RC_MAP_STREAMZAP "rc-streamzap" #define RC_MAP_SU3000 "rc-su3000" -#define RC_MAP_TANGO "rc-tango" #define RC_MAP_TANIX_TX3MINI "rc-tanix-tx3mini" #define RC_MAP_TANIX_TX5MAX "rc-tanix-tx5max" #define RC_MAP_TBS_NEC "rc-tbs-nec" diff --git a/include/media/v4l2-ctrls.h b/include/media/v4l2-ctrls.h index a5953b8128..575b59fbac 100644 --- a/include/media/v4l2-ctrls.h +++ b/include/media/v4l2-ctrls.h @@ -17,7 +17,6 @@ * Include the stateless codec compound control definitions. * This will move to the public headers once this API is fully stable. */ -#include #include /* forward references */ @@ -40,8 +39,9 @@ struct video_device; * @p_u16: Pointer to a 16-bit unsigned value. * @p_u32: Pointer to a 32-bit unsigned value. * @p_char: Pointer to a string. - * @p_mpeg2_slice_params: Pointer to a MPEG2 slice parameters structure. - * @p_mpeg2_quantization: Pointer to a MPEG2 quantization data structure. + * @p_mpeg2_sequence: Pointer to a MPEG2 sequence structure. + * @p_mpeg2_picture: Pointer to a MPEG2 picture structure. + * @p_mpeg2_quantisation: Pointer to a MPEG2 quantisation data structure. * @p_fwht_params: Pointer to a FWHT stateless parameters structure. * @p_h264_sps: Pointer to a struct v4l2_ctrl_h264_sps. * @p_h264_pps: Pointer to a struct v4l2_ctrl_h264_pps. @@ -66,8 +66,9 @@ union v4l2_ctrl_ptr { u16 *p_u16; u32 *p_u32; char *p_char; - struct v4l2_ctrl_mpeg2_slice_params *p_mpeg2_slice_params; - struct v4l2_ctrl_mpeg2_quantization *p_mpeg2_quantization; + struct v4l2_ctrl_mpeg2_sequence *p_mpeg2_sequence; + struct v4l2_ctrl_mpeg2_picture *p_mpeg2_picture; + struct v4l2_ctrl_mpeg2_quantisation *p_mpeg2_quantisation; struct v4l2_ctrl_fwht_params *p_fwht_params; struct v4l2_ctrl_h264_sps *p_h264_sps; struct v4l2_ctrl_h264_pps *p_h264_pps; diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h index d0e9a5bdb0..95f8bfd632 100644 --- a/include/media/v4l2-subdev.h +++ b/include/media/v4l2-subdev.h @@ -162,6 +162,9 @@ struct v4l2_subdev_io_pin_config { * @s_gpio: set GPIO pins. Very simple right now, might need to be extended with * a direction argument if needed. * + * @command: called by in-kernel drivers in order to call functions internal + * to subdev drivers driver that have a separate callback. + * * @ioctl: called at the end of ioctl() syscall handler at the V4L2 core. * used to provide support for private ioctls used on the driver. * @@ -193,6 +196,7 @@ struct v4l2_subdev_core_ops { int (*load_fw)(struct v4l2_subdev *sd); int (*reset)(struct v4l2_subdev *sd, u32 val); int (*s_gpio)(struct v4l2_subdev *sd, u32 val); + long (*command)(struct v4l2_subdev *sd, unsigned int cmd, void *arg); long (*ioctl)(struct v4l2_subdev *sd, unsigned int cmd, void *arg); #ifdef CONFIG_COMPAT long (*compat_ioctl32)(struct v4l2_subdev *sd, unsigned int cmd, @@ -623,6 +627,19 @@ struct v4l2_subdev_pad_config { struct v4l2_rect try_compose; }; +/** + * struct v4l2_subdev_state - Used for storing subdev state information. + * + * @pads: &struct v4l2_subdev_pad_config array + * + * This structure only needs to be passed to the pad op if the 'which' field + * of the main argument is set to %V4L2_SUBDEV_FORMAT_TRY. For + * %V4L2_SUBDEV_FORMAT_ACTIVE it is safe to pass %NULL. + */ +struct v4l2_subdev_state { + struct v4l2_subdev_pad_config *pads; +}; + /** * struct v4l2_subdev_pad_ops - v4l2-subdev pad level operations * @@ -687,27 +704,27 @@ struct v4l2_subdev_pad_config { */ struct v4l2_subdev_pad_ops { int (*init_cfg)(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg); + struct v4l2_subdev_state *state); int (*enum_mbus_code)(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_state *state, struct v4l2_subdev_mbus_code_enum *code); int (*enum_frame_size)(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_state *state, struct v4l2_subdev_frame_size_enum *fse); int (*enum_frame_interval)(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_state *state, struct v4l2_subdev_frame_interval_enum *fie); int (*get_fmt)(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_state *state, struct v4l2_subdev_format *format); int (*set_fmt)(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_state *state, struct v4l2_subdev_format *format); int (*get_selection)(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_state *state, struct v4l2_subdev_selection *sel); int (*set_selection)(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_state *state, struct v4l2_subdev_selection *sel); int (*get_edid)(struct v4l2_subdev *sd, struct v4l2_edid *edid); int (*set_edid)(struct v4l2_subdev *sd, struct v4l2_edid *edid); @@ -918,14 +935,14 @@ struct v4l2_subdev { * struct v4l2_subdev_fh - Used for storing subdev information per file handle * * @vfh: pointer to &struct v4l2_fh - * @pad: pointer to &struct v4l2_subdev_pad_config + * @state: pointer to &struct v4l2_subdev_state * @owner: module pointer to the owner of this file handle */ struct v4l2_subdev_fh { struct v4l2_fh vfh; struct module *owner; #if defined(CONFIG_VIDEO_V4L2_SUBDEV_API) - struct v4l2_subdev_pad_config *pad; + struct v4l2_subdev_state *state; #endif }; @@ -945,17 +962,17 @@ struct v4l2_subdev_fh { * &struct v4l2_subdev_pad_config->try_fmt * * @sd: pointer to &struct v4l2_subdev - * @cfg: pointer to &struct v4l2_subdev_pad_config array. - * @pad: index of the pad in the @cfg array. + * @state: pointer to &struct v4l2_subdev_state + * @pad: index of the pad in the &struct v4l2_subdev_state->pads array */ static inline struct v4l2_mbus_framefmt * v4l2_subdev_get_try_format(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_state *state, unsigned int pad) { if (WARN_ON(pad >= sd->entity.num_pads)) pad = 0; - return &cfg[pad].try_fmt; + return &state->pads[pad].try_fmt; } /** @@ -963,17 +980,17 @@ v4l2_subdev_get_try_format(struct v4l2_subdev *sd, * &struct v4l2_subdev_pad_config->try_crop * * @sd: pointer to &struct v4l2_subdev - * @cfg: pointer to &struct v4l2_subdev_pad_config array. - * @pad: index of the pad in the @cfg array. + * @state: pointer to &struct v4l2_subdev_state. + * @pad: index of the pad in the &struct v4l2_subdev_state->pads array. */ static inline struct v4l2_rect * v4l2_subdev_get_try_crop(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_state *state, unsigned int pad) { if (WARN_ON(pad >= sd->entity.num_pads)) pad = 0; - return &cfg[pad].try_crop; + return &state->pads[pad].try_crop; } /** @@ -981,17 +998,17 @@ v4l2_subdev_get_try_crop(struct v4l2_subdev *sd, * &struct v4l2_subdev_pad_config->try_compose * * @sd: pointer to &struct v4l2_subdev - * @cfg: pointer to &struct v4l2_subdev_pad_config array. - * @pad: index of the pad in the @cfg array. + * @state: pointer to &struct v4l2_subdev_state. + * @pad: index of the pad in the &struct v4l2_subdev_state->pads array. */ static inline struct v4l2_rect * v4l2_subdev_get_try_compose(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_state *state, unsigned int pad) { if (WARN_ON(pad >= sd->entity.num_pads)) pad = 0; - return &cfg[pad].try_compose; + return &state->pads[pad].try_compose; } #endif @@ -1093,20 +1110,21 @@ int v4l2_subdev_link_validate_default(struct v4l2_subdev *sd, int v4l2_subdev_link_validate(struct media_link *link); /** - * v4l2_subdev_alloc_pad_config - Allocates memory for pad config + * v4l2_subdev_alloc_state - allocate v4l2_subdev_state * - * @sd: pointer to struct v4l2_subdev + * @sd: pointer to &struct v4l2_subdev for which the state is being allocated. + * + * Must call v4l2_subdev_free_state() when state is no longer needed. */ -struct -v4l2_subdev_pad_config *v4l2_subdev_alloc_pad_config(struct v4l2_subdev *sd); +struct v4l2_subdev_state *v4l2_subdev_alloc_state(struct v4l2_subdev *sd); /** - * v4l2_subdev_free_pad_config - Frees memory allocated by - * v4l2_subdev_alloc_pad_config(). + * v4l2_subdev_free_state - free a v4l2_subdev_state * - * @cfg: pointer to &struct v4l2_subdev_pad_config + * @state: v4l2_subdev_state to be freed. */ -void v4l2_subdev_free_pad_config(struct v4l2_subdev_pad_config *cfg); +void v4l2_subdev_free_state(struct v4l2_subdev_state *state); + #endif /* CONFIG_MEDIA_CONTROLLER */ /** diff --git a/include/media/videobuf2-v4l2.h b/include/media/videobuf2-v4l2.h index c203047eb8..b66585e304 100644 --- a/include/media/videobuf2-v4l2.h +++ b/include/media/videobuf2-v4l2.h @@ -261,6 +261,22 @@ int __must_check vb2_queue_init_name(struct vb2_queue *q, const char *name); */ void vb2_queue_release(struct vb2_queue *q); +/** + * vb2_queue_change_type() - change the type of an inactive vb2_queue + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @type: the type to change to (V4L2_BUF_TYPE_VIDEO_*) + * + * This function changes the type of the vb2_queue. This is only possible + * if the queue is not busy (i.e. no buffers have been allocated). + * + * vb2_queue_change_type() can be used to support multiple buffer types using + * the same queue. The driver can implement v4l2_ioctl_ops.vidioc_reqbufs and + * v4l2_ioctl_ops.vidioc_create_bufs functions and call vb2_queue_change_type() + * before calling vb2_ioctl_reqbufs() or vb2_ioctl_create_bufs(), and thus + * "lock" the buffer type until the buffers have been released. + */ +int vb2_queue_change_type(struct vb2_queue *q, unsigned int type); + /** * vb2_poll() - implements poll userspace operation * @q: pointer to &struct vb2_queue with videobuf2 queue. diff --git a/include/memory/renesas-rpc-if.h b/include/memory/renesas-rpc-if.h index 14cfd03626..e3e770f76f 100644 --- a/include/memory/renesas-rpc-if.h +++ b/include/memory/renesas-rpc-if.h @@ -19,7 +19,7 @@ enum rpcif_data_dir { RPCIF_DATA_OUT, }; -struct rpcif_op { +struct rpcif_op { struct { u8 buswidth; u8 opcode; @@ -57,7 +57,7 @@ struct rpcif_op { } data; }; -struct rpcif { +struct rpcif { struct device *dev; void __iomem *dirmap; struct regmap *regmap; @@ -76,7 +76,7 @@ struct rpcif { u32 ddr; /* DRDRENR or SMDRENR */ }; -int rpcif_sw_init(struct rpcif *rpc, struct device *dev); +int rpcif_sw_init(struct rpcif *rpc, struct device *dev); void rpcif_hw_init(struct rpcif *rpc, bool hyperflash); void rpcif_prepare(struct rpcif *rpc, const struct rpcif_op *op, u64 *offs, size_t *len); diff --git a/include/net/af_vsock.h b/include/net/af_vsock.h index b1c7172869..ab207677e0 100644 --- a/include/net/af_vsock.h +++ b/include/net/af_vsock.h @@ -135,6 +135,14 @@ struct vsock_transport { bool (*stream_is_active)(struct vsock_sock *); bool (*stream_allow)(u32 cid, u32 port); + /* SEQ_PACKET. */ + ssize_t (*seqpacket_dequeue)(struct vsock_sock *vsk, struct msghdr *msg, + int flags); + int (*seqpacket_enqueue)(struct vsock_sock *vsk, struct msghdr *msg, + size_t len); + bool (*seqpacket_allow)(u32 remote_cid); + u32 (*seqpacket_has_data)(struct vsock_sock *vsk); + /* Notification. */ int (*notify_poll_in)(struct vsock_sock *, size_t, bool *); int (*notify_poll_out)(struct vsock_sock *, size_t, bool *); diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index ea4ae551c4..b80415011d 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h @@ -36,7 +36,7 @@ #define HCI_MAX_AMP_ASSOC_SIZE 672 -#define HCI_MAX_CSB_DATA_SIZE 252 +#define HCI_MAX_CPB_DATA_SIZE 252 /* HCI dev events */ #define HCI_DEV_REG 1 @@ -339,6 +339,7 @@ enum { #define HCI_PAIRING_TIMEOUT msecs_to_jiffies(60000) /* 60 seconds */ #define HCI_INIT_TIMEOUT msecs_to_jiffies(10000) /* 10 seconds */ #define HCI_CMD_TIMEOUT msecs_to_jiffies(2000) /* 2 seconds */ +#define HCI_NCMD_TIMEOUT msecs_to_jiffies(4000) /* 4 seconds */ #define HCI_ACL_TX_TIMEOUT msecs_to_jiffies(45000) /* 45 seconds */ #define HCI_AUTO_OFF_TIMEOUT msecs_to_jiffies(2000) /* 2 seconds */ #define HCI_POWER_OFF_TIMEOUT msecs_to_jiffies(5000) /* 5 seconds */ @@ -471,10 +472,10 @@ enum { #define LMP_EXTFEATURES 0x80 /* Extended LMP features */ -#define LMP_CSB_MASTER 0x01 -#define LMP_CSB_SLAVE 0x02 -#define LMP_SYNC_TRAIN 0x04 -#define LMP_SYNC_SCAN 0x08 +#define LMP_CPB_CENTRAL 0x01 +#define LMP_CPB_PERIPHERAL 0x02 +#define LMP_SYNC_TRAIN 0x04 +#define LMP_SYNC_SCAN 0x08 #define LMP_SC 0x01 #define LMP_PING 0x02 @@ -488,7 +489,7 @@ enum { /* LE features */ #define HCI_LE_ENCRYPTION 0x01 #define HCI_LE_CONN_PARAM_REQ_PROC 0x02 -#define HCI_LE_SLAVE_FEATURES 0x08 +#define HCI_LE_PERIPHERAL_FEATURES 0x08 #define HCI_LE_PING 0x10 #define HCI_LE_DATA_LEN_EXT 0x20 #define HCI_LE_LL_PRIVACY 0x40 @@ -497,8 +498,8 @@ enum { #define HCI_LE_PHY_CODED 0x08 #define HCI_LE_EXT_ADV 0x10 #define HCI_LE_CHAN_SEL_ALG2 0x40 -#define HCI_LE_CIS_MASTER 0x10 -#define HCI_LE_CIS_SLAVE 0x20 +#define HCI_LE_CIS_CENTRAL 0x10 +#define HCI_LE_CIS_PERIPHERAL 0x20 /* Connection modes */ #define HCI_CM_ACTIVE 0x0000 @@ -876,17 +877,17 @@ struct hci_rp_logical_link_cancel { __u8 flow_spec_id; } __packed; -#define HCI_OP_SET_CSB 0x0441 -struct hci_cp_set_csb { +#define HCI_OP_SET_CPB 0x0441 +struct hci_cp_set_cpb { __u8 enable; __u8 lt_addr; __u8 lpo_allowed; __le16 packet_type; __le16 interval_min; __le16 interval_max; - __le16 csb_sv_tout; + __le16 cpb_sv_tout; } __packed; -struct hci_rp_set_csb { +struct hci_rp_set_cpb { __u8 status; __u8 lt_addr; __le16 interval; @@ -1183,14 +1184,14 @@ struct hci_rp_delete_reserved_lt_addr { __u8 lt_addr; } __packed; -#define HCI_OP_SET_CSB_DATA 0x0c76 -struct hci_cp_set_csb_data { +#define HCI_OP_SET_CPB_DATA 0x0c76 +struct hci_cp_set_cpb_data { __u8 lt_addr; __u8 fragment; __u8 data_length; - __u8 data[HCI_MAX_CSB_DATA_SIZE]; + __u8 data[HCI_MAX_CPB_DATA_SIZE]; } __packed; -struct hci_rp_set_csb_data { +struct hci_rp_set_cpb_data { __u8 status; __u8 lt_addr; } __packed; @@ -1504,7 +1505,7 @@ struct hci_cp_le_set_scan_enable { } __packed; #define HCI_LE_USE_PEER_ADDR 0x00 -#define HCI_LE_USE_WHITELIST 0x01 +#define HCI_LE_USE_ACCEPT_LIST 0x01 #define HCI_OP_LE_CREATE_CONN 0x200d struct hci_cp_le_create_conn { @@ -1524,22 +1525,22 @@ struct hci_cp_le_create_conn { #define HCI_OP_LE_CREATE_CONN_CANCEL 0x200e -#define HCI_OP_LE_READ_WHITE_LIST_SIZE 0x200f -struct hci_rp_le_read_white_list_size { +#define HCI_OP_LE_READ_ACCEPT_LIST_SIZE 0x200f +struct hci_rp_le_read_accept_list_size { __u8 status; __u8 size; } __packed; -#define HCI_OP_LE_CLEAR_WHITE_LIST 0x2010 +#define HCI_OP_LE_CLEAR_ACCEPT_LIST 0x2010 -#define HCI_OP_LE_ADD_TO_WHITE_LIST 0x2011 -struct hci_cp_le_add_to_white_list { +#define HCI_OP_LE_ADD_TO_ACCEPT_LIST 0x2011 +struct hci_cp_le_add_to_accept_list { __u8 bdaddr_type; bdaddr_t bdaddr; } __packed; -#define HCI_OP_LE_DEL_FROM_WHITE_LIST 0x2012 -struct hci_cp_le_del_from_white_list { +#define HCI_OP_LE_DEL_FROM_ACCEPT_LIST 0x2012 +struct hci_cp_le_del_from_accept_list { __u8 bdaddr_type; bdaddr_t bdaddr; } __packed; @@ -1774,13 +1775,15 @@ struct hci_cp_ext_adv_set { __u8 max_events; } __packed; +#define HCI_MAX_EXT_AD_LENGTH 251 + #define HCI_OP_LE_SET_EXT_ADV_DATA 0x2037 struct hci_cp_le_set_ext_adv_data { __u8 handle; __u8 operation; __u8 frag_pref; __u8 length; - __u8 data[HCI_MAX_AD_LENGTH]; + __u8 data[]; } __packed; #define HCI_OP_LE_SET_EXT_SCAN_RSP_DATA 0x2038 @@ -1789,7 +1792,7 @@ struct hci_cp_le_set_ext_scan_rsp_data { __u8 operation; __u8 frag_pref; __u8 length; - __u8 data[HCI_MAX_AD_LENGTH]; + __u8 data[]; } __packed; #define LE_SET_ADV_DATA_OP_COMPLETE 0x03 @@ -1838,23 +1841,23 @@ struct hci_rp_le_read_iso_tx_sync { #define HCI_OP_LE_SET_CIG_PARAMS 0x2062 struct hci_cis_params { __u8 cis_id; - __le16 m_sdu; - __le16 s_sdu; - __u8 m_phy; - __u8 s_phy; - __u8 m_rtn; - __u8 s_rtn; + __le16 c_sdu; + __le16 p_pdu; + __u8 c_phy; + __u8 p_phy; + __u8 c_rtn; + __u8 p_rtn; } __packed; struct hci_cp_le_set_cig_params { __u8 cig_id; - __u8 m_interval[3]; - __u8 s_interval[3]; - __u8 sca; + __u8 c_interval[3]; + __u8 p_interval[3]; + __u8 wc_sca; __u8 packing; __u8 framing; - __le16 m_latency; - __le16 s_latency; + __le16 c_latency; + __le16 p_latency; __u8 num_cis; struct hci_cis_params cis[]; } __packed; @@ -2259,7 +2262,7 @@ struct hci_ev_sync_train_complete { __u8 status; } __packed; -#define HCI_EV_SLAVE_PAGE_RESP_TIMEOUT 0x54 +#define HCI_EV_PERIPHERAL_PAGE_RESP_TIMEOUT 0x54 #define HCI_EV_LE_CONN_COMPLETE 0x01 struct hci_ev_le_conn_complete { @@ -2417,17 +2420,17 @@ struct hci_evt_le_cis_established { __le16 handle; __u8 cig_sync_delay[3]; __u8 cis_sync_delay[3]; - __u8 m_latency[3]; - __u8 s_latency[3]; - __u8 m_phy; - __u8 s_phy; + __u8 c_latency[3]; + __u8 p_latency[3]; + __u8 c_phy; + __u8 p_phy; __u8 nse; - __u8 m_bn; - __u8 s_bn; - __u8 m_ft; - __u8 s_ft; - __le16 m_mtu; - __le16 s_mtu; + __u8 c_bn; + __u8 p_bn; + __u8 c_ft; + __u8 p_ft; + __le16 c_mtu; + __le16 p_mtu; __le16 interval; } __packed; diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index c73ac52af1..a53e94459e 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -122,7 +122,7 @@ struct hci_conn_hash { unsigned int amp_num; unsigned int sco_num; unsigned int le_num; - unsigned int le_num_slave; + unsigned int le_num_peripheral; }; struct bdaddr_list { @@ -228,9 +228,9 @@ struct adv_info { __u16 remaining_time; __u16 duration; __u16 adv_data_len; - __u8 adv_data[HCI_MAX_AD_LENGTH]; + __u8 adv_data[HCI_MAX_EXT_AD_LENGTH]; __u16 scan_rsp_len; - __u8 scan_rsp_data[HCI_MAX_AD_LENGTH]; + __u8 scan_rsp_data[HCI_MAX_EXT_AD_LENGTH]; __s8 tx_power; __u32 min_interval; __u32 max_interval; @@ -327,7 +327,7 @@ struct hci_dev { __u8 max_page; __u8 features[HCI_MAX_PAGES][8]; __u8 le_features[8]; - __u8 le_white_list_size; + __u8 le_accept_list_size; __u8 le_resolv_list_size; __u8 le_num_of_adv_sets; __u8 le_states[8]; @@ -470,6 +470,7 @@ struct hci_dev { struct delayed_work service_cache; struct delayed_work cmd_timer; + struct delayed_work ncmd_timer; struct work_struct rx_work; struct work_struct cmd_work; @@ -521,14 +522,14 @@ struct hci_dev { struct hci_conn_hash conn_hash; struct list_head mgmt_pending; - struct list_head blacklist; - struct list_head whitelist; + struct list_head reject_list; + struct list_head accept_list; struct list_head uuids; struct list_head link_keys; struct list_head long_term_keys; struct list_head identity_resolving_keys; struct list_head remote_oob_data; - struct list_head le_white_list; + struct list_head le_accept_list; struct list_head le_resolv_list; struct list_head le_conn_params; struct list_head pend_le_conns; @@ -550,9 +551,9 @@ struct hci_dev { DECLARE_BITMAP(dev_flags, __HCI_NUM_FLAGS); __s8 adv_tx_power; - __u8 adv_data[HCI_MAX_AD_LENGTH]; + __u8 adv_data[HCI_MAX_EXT_AD_LENGTH]; __u8 adv_data_len; - __u8 scan_rsp_data[HCI_MAX_AD_LENGTH]; + __u8 scan_rsp_data[HCI_MAX_EXT_AD_LENGTH]; __u8 scan_rsp_data_len; struct list_head adv_instances; @@ -893,7 +894,7 @@ static inline void hci_conn_hash_add(struct hci_dev *hdev, struct hci_conn *c) case LE_LINK: h->le_num++; if (c->role == HCI_ROLE_SLAVE) - h->le_num_slave++; + h->le_num_peripheral++; break; case SCO_LINK: case ESCO_LINK: @@ -919,7 +920,7 @@ static inline void hci_conn_hash_del(struct hci_dev *hdev, struct hci_conn *c) case LE_LINK: h->le_num--; if (c->role == HCI_ROLE_SLAVE) - h->le_num_slave--; + h->le_num_peripheral--; break; case SCO_LINK: case ESCO_LINK: @@ -1393,8 +1394,8 @@ void hci_conn_del_sysfs(struct hci_conn *conn); #define lmp_edr_5slot_capable(dev) ((dev)->features[0][5] & LMP_EDR_5SLOT) /* ----- Extended LMP capabilities ----- */ -#define lmp_csb_master_capable(dev) ((dev)->features[2][0] & LMP_CSB_MASTER) -#define lmp_csb_slave_capable(dev) ((dev)->features[2][0] & LMP_CSB_SLAVE) +#define lmp_cpb_central_capable(dev) ((dev)->features[2][0] & LMP_CPB_CENTRAL) +#define lmp_cpb_peripheral_capable(dev) ((dev)->features[2][0] & LMP_CPB_PERIPHERAL) #define lmp_sync_train_capable(dev) ((dev)->features[2][0] & LMP_SYNC_TRAIN) #define lmp_sync_scan_capable(dev) ((dev)->features[2][0] & LMP_SYNC_SCAN) #define lmp_sc_capable(dev) ((dev)->features[2][1] & LMP_SC) @@ -1768,7 +1769,7 @@ void __mgmt_power_off(struct hci_dev *hdev); void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key, bool persistent); void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn, - u32 flags, u8 *name, u8 name_len); + u8 *name, u8 name_len); void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u8 reason, bool mgmt_connected); diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h index a7cffb0695..23a0524061 100644 --- a/include/net/bluetooth/mgmt.h +++ b/include/net/bluetooth/mgmt.h @@ -202,7 +202,7 @@ struct mgmt_cp_load_link_keys { struct mgmt_ltk_info { struct mgmt_addr_info addr; __u8 type; - __u8 master; + __u8 initiator; __u8 enc_size; __le16 ediv; __le64 rand; @@ -939,6 +939,7 @@ struct mgmt_ev_auth_failed { #define MGMT_DEV_FOUND_CONFIRM_NAME 0x01 #define MGMT_DEV_FOUND_LEGACY_PAIRING 0x02 #define MGMT_DEV_FOUND_NOT_CONNECTABLE 0x04 +#define MGMT_DEV_FOUND_INITIATED_CONN 0x08 #define MGMT_EV_DEVICE_FOUND 0x0012 struct mgmt_ev_device_found { diff --git a/include/net/bonding.h b/include/net/bonding.h index 019e998d94..625d9c72de 100644 --- a/include/net/bonding.h +++ b/include/net/bonding.h @@ -201,6 +201,11 @@ struct bond_up_slave { */ #define BOND_LINK_NOCHANGE -1 +struct bond_ipsec { + struct list_head list; + struct xfrm_state *xs; +}; + /* * Here are the locking policies for the two bonding locks: * Get rcu_read_lock when reading or RTNL when writing slave list. @@ -232,7 +237,7 @@ struct bonding { char proc_file_name[IFNAMSIZ]; #endif /* CONFIG_PROC_FS */ struct list_head bond_list; - u32 rr_tx_counter; + u32 __percpu *rr_tx_counter; struct ad_bond_info ad_info; struct alb_bond_info alb_info; struct bond_params params; @@ -249,7 +254,9 @@ struct bonding { #endif /* CONFIG_DEBUG_FS */ struct rtnl_link_stats64 bond_stats; #ifdef CONFIG_XFRM_OFFLOAD - struct xfrm_state *xs; + struct list_head ipsec_list; + /* protecting ipsec_list */ + spinlock_t ipsec_lock; #endif /* CONFIG_XFRM_OFFLOAD */ }; diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h index 73af4a64a5..40296ed976 100644 --- a/include/net/busy_poll.h +++ b/include/net/busy_poll.h @@ -38,7 +38,7 @@ static inline bool net_busy_loop_on(void) static inline bool sk_can_busy_loop(const struct sock *sk) { - return sk->sk_ll_usec && !signal_pending(current); + return READ_ONCE(sk->sk_ll_usec) && !signal_pending(current); } bool sk_busy_loop_end(void *p, unsigned long start_time); diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 58c2cd417e..161cdf7df1 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -7,7 +7,7 @@ * Copyright 2006-2010 Johannes Berg * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright 2015-2017 Intel Deutschland GmbH - * Copyright (C) 2018-2020 Intel Corporation + * Copyright (C) 2018-2021 Intel Corporation */ #include @@ -22,6 +22,7 @@ #include #include #include +#include #include /** @@ -370,11 +371,18 @@ struct ieee80211_sta_he_cap { * @he_cap: holds the HE capabilities * @he_6ghz_capa: HE 6 GHz capabilities, must be filled in for a * 6 GHz band channel (and 0 may be valid value). + * @vendor_elems: vendor element(s) to advertise + * @vendor_elems.data: vendor element(s) data + * @vendor_elems.len: vendor element(s) length */ struct ieee80211_sband_iftype_data { u16 types_mask; struct ieee80211_sta_he_cap he_cap; struct ieee80211_he_6ghz_capa he_6ghz_capa; + struct { + const u8 *data; + unsigned int len; + } vendor_elems; }; /** @@ -533,18 +541,6 @@ ieee80211_get_he_iftype_cap(const struct ieee80211_supported_band *sband, return NULL; } -/** - * ieee80211_get_he_sta_cap - return HE capabilities for an sband's STA - * @sband: the sband to search for the STA on - * - * Return: pointer to the struct ieee80211_sta_he_cap, or NULL is none found - */ -static inline const struct ieee80211_sta_he_cap * -ieee80211_get_he_sta_cap(const struct ieee80211_supported_band *sband) -{ - return ieee80211_get_he_iftype_cap(sband, NL80211_IFTYPE_STATION); -} - /** * ieee80211_get_he_6ghz_capa - return HE 6 GHz capabilities * @sband: the sband to search for the STA on @@ -905,6 +901,17 @@ ieee80211_chandef_max_power(struct cfg80211_chan_def *chandef) return chandef->chan->max_power; } +/** + * cfg80211_any_usable_channels - check for usable channels + * @wiphy: the wiphy to check for + * @band_mask: which bands to check on + * @prohibited_flags: which channels to not consider usable, + * %IEEE80211_CHAN_DISABLED is always taken into account + */ +bool cfg80211_any_usable_channels(struct wiphy *wiphy, + unsigned long band_mask, + u32 prohibited_flags); + /** * enum survey_info_flags - survey information flags * @@ -1245,8 +1252,6 @@ struct cfg80211_csa_settings { u8 count; }; -#define CFG80211_MAX_NUM_DIFFERENT_CHANNELS 10 - /** * struct iface_combination_params - input parameters for interface combinations * @@ -3522,7 +3527,10 @@ struct cfg80211_pmsr_result { * If neither @trigger_based nor @non_trigger_based is set, * EDCA based ranging will be used. * @lmr_feedback: negotiate for I2R LMR feedback. Only valid if either - * @trigger_based or @non_trigger_based is set. + * @trigger_based or @non_trigger_based is set. + * @bss_color: the bss color of the responder. Optional. Set to zero to + * indicate the driver should set the BSS color. Only valid if + * @non_trigger_based or @trigger_based is set. * * See also nl80211 for the respective attribute documentation. */ @@ -3540,6 +3548,7 @@ struct cfg80211_pmsr_ftm_request_peer { u8 burst_duration; u8 ftms_per_burst; u8 ftmr_retries; + u8 bss_color; }; /** @@ -4945,6 +4954,7 @@ struct wiphy_iftype_akm_suites { * configuration through the %NL80211_TID_CONFIG_ATTR_RETRY_SHORT and * %NL80211_TID_CONFIG_ATTR_RETRY_LONG attributes * @sar_capa: SAR control capabilities + * @rfkill: a pointer to the rfkill structure */ struct wiphy { struct mutex mtx; @@ -5087,6 +5097,8 @@ struct wiphy { const struct cfg80211_sar_capa *sar_capa; + struct rfkill *rfkill; + char priv[] __aligned(NETDEV_ALIGN); }; @@ -6661,7 +6673,10 @@ void wiphy_rfkill_start_polling(struct wiphy *wiphy); * wiphy_rfkill_stop_polling - stop polling rfkill * @wiphy: the wiphy */ -void wiphy_rfkill_stop_polling(struct wiphy *wiphy); +static inline void wiphy_rfkill_stop_polling(struct wiphy *wiphy) +{ + rfkill_pause_polling(wiphy->rfkill); +} /** * DOC: Vendor commands @@ -8154,6 +8169,8 @@ bool cfg80211_iftype_allowed(struct wiphy *wiphy, enum nl80211_iftype iftype, dev_notice(&(wiphy)->dev, format, ##args) #define wiphy_info(wiphy, format, args...) \ dev_info(&(wiphy)->dev, format, ##args) +#define wiphy_info_once(wiphy, format, args...) \ + dev_info_once(&(wiphy)->dev, format, ##args) #define wiphy_err_ratelimited(wiphy, format, args...) \ dev_err_ratelimited(&(wiphy)->dev, format, ##args) diff --git a/include/net/checksum.h b/include/net/checksum.h index 0d05b9e869..5b96d5bd6e 100644 --- a/include/net/checksum.h +++ b/include/net/checksum.h @@ -80,16 +80,18 @@ static inline __sum16 csum16_sub(__sum16 csum, __be16 addend) return csum16_add(csum, ~addend); } +static inline __wsum csum_shift(__wsum sum, int offset) +{ + /* rotate sum to align it with a 16b boundary */ + if (offset & 1) + return (__force __wsum)ror32((__force u32)sum, 8); + return sum; +} + static inline __wsum csum_block_add(__wsum csum, __wsum csum2, int offset) { - u32 sum = (__force u32)csum2; - - /* rotate sum to align it with a 16b boundary */ - if (offset & 1) - sum = ror32(sum, 8); - - return csum_add(csum, (__force __wsum)sum); + return csum_add(csum, csum_shift(csum2, offset)); } static inline __wsum diff --git a/include/net/devlink.h b/include/net/devlink.h index 7c984cadfe..57b738b780 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -34,6 +34,7 @@ struct devlink_ops; struct devlink { struct list_head list; struct list_head port_list; + struct list_head rate_list; struct list_head sb_list; struct list_head dpipe_table_list; struct list_head resource_list; @@ -133,6 +134,24 @@ struct devlink_port_attrs { }; }; +struct devlink_rate { + struct list_head list; + enum devlink_rate_type type; + struct devlink *devlink; + void *priv; + u64 tx_share; + u64 tx_max; + + struct devlink_rate *parent; + union { + struct devlink_port *devlink_port; + struct { + char *name; + refcount_t refcnt; + }; + }; +}; + struct devlink_port { struct list_head list; struct list_head param_list; @@ -152,6 +171,8 @@ struct devlink_port { struct delayed_work type_warn_dw; struct list_head reporter_list; struct mutex reporters_lock; /* Protects reporter_list */ + + struct devlink_rate *devlink_rate; }; struct devlink_port_new_attrs { @@ -1326,6 +1347,16 @@ struct devlink_ops { const struct devlink_trap_group *group, enum devlink_trap_action action, struct netlink_ext_ack *extack); + /** + * @trap_drop_counter_get: Trap drop counter get function. + * + * Should be used by device drivers to report number of packets + * that have been dropped, and cannot be passed to the devlink + * subsystem by the underlying device. + */ + int (*trap_drop_counter_get)(struct devlink *devlink, + const struct devlink_trap *trap, + u64 *p_drops); /** * @trap_policer_init: Trap policer initialization function. * @@ -1453,6 +1484,30 @@ struct devlink_ops { struct devlink_port *port, enum devlink_port_fn_state state, struct netlink_ext_ack *extack); + + /** + * Rate control callbacks. + */ + int (*rate_leaf_tx_share_set)(struct devlink_rate *devlink_rate, void *priv, + u64 tx_share, struct netlink_ext_ack *extack); + int (*rate_leaf_tx_max_set)(struct devlink_rate *devlink_rate, void *priv, + u64 tx_max, struct netlink_ext_ack *extack); + int (*rate_node_tx_share_set)(struct devlink_rate *devlink_rate, void *priv, + u64 tx_share, struct netlink_ext_ack *extack); + int (*rate_node_tx_max_set)(struct devlink_rate *devlink_rate, void *priv, + u64 tx_max, struct netlink_ext_ack *extack); + int (*rate_node_new)(struct devlink_rate *rate_node, void **priv, + struct netlink_ext_ack *extack); + int (*rate_node_del)(struct devlink_rate *rate_node, void *priv, + struct netlink_ext_ack *extack); + int (*rate_leaf_parent_set)(struct devlink_rate *child, + struct devlink_rate *parent, + void *priv_child, void *priv_parent, + struct netlink_ext_ack *extack); + int (*rate_node_parent_set)(struct devlink_rate *child, + struct devlink_rate *parent, + void *priv_child, void *priv_parent, + struct netlink_ext_ack *extack); }; static inline void *devlink_priv(struct devlink *devlink) @@ -1512,6 +1567,9 @@ void devlink_port_attrs_pci_vf_set(struct devlink_port *devlink_port, u32 contro void devlink_port_attrs_pci_sf_set(struct devlink_port *devlink_port, u32 controller, u16 pf, u32 sf, bool external); +int devlink_rate_leaf_create(struct devlink_port *port, void *priv); +void devlink_rate_leaf_destroy(struct devlink_port *devlink_port); +void devlink_rate_nodes_destroy(struct devlink *devlink); int devlink_sb_register(struct devlink *devlink, unsigned int sb_index, u32 size, u16 ingress_pools_count, u16 egress_pools_count, u16 ingress_tc_count, diff --git a/include/net/dsa.h b/include/net/dsa.h index e1a2610a0e..33f40c1ec3 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -50,6 +50,7 @@ struct phylink_link_state; #define DSA_TAG_PROTO_OCELOT_8021Q_VALUE 20 #define DSA_TAG_PROTO_SEVILLE_VALUE 21 #define DSA_TAG_PROTO_BRCM_LEGACY_VALUE 22 +#define DSA_TAG_PROTO_SJA1110_VALUE 23 enum dsa_tag_protocol { DSA_TAG_PROTO_NONE = DSA_TAG_PROTO_NONE_VALUE, @@ -75,6 +76,7 @@ enum dsa_tag_protocol { DSA_TAG_PROTO_XRS700X = DSA_TAG_PROTO_XRS700X_VALUE, DSA_TAG_PROTO_OCELOT_8021Q = DSA_TAG_PROTO_OCELOT_8021Q_VALUE, DSA_TAG_PROTO_SEVILLE = DSA_TAG_PROTO_SEVILLE_VALUE, + DSA_TAG_PROTO_SJA1110 = DSA_TAG_PROTO_SJA1110_VALUE, }; struct packet_type; @@ -91,7 +93,8 @@ struct dsa_device_ops { * as regular on the master net device. */ bool (*filter)(const struct sk_buff *skb, struct net_device *dev); - unsigned int overhead; + unsigned int needed_headroom; + unsigned int needed_tailroom; const char *name; enum dsa_tag_protocol proto; /* Some tagging protocols either mangle or shift the destination MAC @@ -100,7 +103,6 @@ struct dsa_device_ops { * its RX filter. */ bool promisc_on_master; - bool tail_tag; }; /* This structure defines the control interfaces that are overlayed by the @@ -283,6 +285,12 @@ struct dsa_port { */ const struct dsa_netdevice_ops *netdev_ops; + /* List of MAC addresses that must be forwarded on this port. + * These are only valid on CPU ports and DSA links. + */ + struct list_head fdbs; + struct list_head mdbs; + bool setup; }; @@ -297,6 +305,13 @@ struct dsa_link { struct list_head list; }; +struct dsa_mac_addr { + unsigned char addr[ETH_ALEN]; + u16 vid; + refcount_t refcount; + struct list_head list; +}; + struct dsa_switch { bool setup; @@ -407,6 +422,21 @@ static inline struct dsa_port *dsa_to_port(struct dsa_switch *ds, int p) return NULL; } +static inline bool dsa_port_is_dsa(struct dsa_port *port) +{ + return port->type == DSA_PORT_TYPE_DSA; +} + +static inline bool dsa_port_is_cpu(struct dsa_port *port) +{ + return port->type == DSA_PORT_TYPE_CPU; +} + +static inline bool dsa_port_is_user(struct dsa_port *dp) +{ + return dp->type == DSA_PORT_TYPE_USER; +} + static inline bool dsa_is_unused_port(struct dsa_switch *ds, int p) { return dsa_to_port(ds, p)->type == DSA_PORT_TYPE_UNUSED; @@ -474,6 +504,32 @@ static inline unsigned int dsa_upstream_port(struct dsa_switch *ds, int port) return dsa_towards_port(ds, cpu_dp->ds->index, cpu_dp->index); } +/* Return true if this is the local port used to reach the CPU port */ +static inline bool dsa_is_upstream_port(struct dsa_switch *ds, int port) +{ + if (dsa_is_unused_port(ds, port)) + return false; + + return port == dsa_upstream_port(ds, port); +} + +/* Return true if @upstream_ds is an upstream switch of @downstream_ds, meaning + * that the routing port from @downstream_ds to @upstream_ds is also the port + * which @downstream_ds uses to reach its dedicated CPU. + */ +static inline bool dsa_switch_is_upstream_of(struct dsa_switch *upstream_ds, + struct dsa_switch *downstream_ds) +{ + int routing_port; + + if (upstream_ds == downstream_ds) + return true; + + routing_port = dsa_routing_port(downstream_ds, upstream_ds->index); + + return dsa_is_upstream_port(downstream_ds, routing_port); +} + static inline bool dsa_port_is_vlan_filtering(const struct dsa_port *dp) { const struct dsa_switch *ds = dp->ds; @@ -926,7 +982,7 @@ static inline void dsa_tag_generic_flow_dissect(const struct sk_buff *skb, { #if IS_ENABLED(CONFIG_NET_DSA) const struct dsa_device_ops *ops = skb->dev->dsa_ptr->tag_ops; - int tag_len = ops->overhead; + int tag_len = ops->needed_headroom; *offset = tag_len; *proto = ((__be16 *)skb->data)[(tag_len / 2) - 1]; diff --git a/include/net/dst_metadata.h b/include/net/dst_metadata.h index 56cb3c3856..14efa0ded7 100644 --- a/include/net/dst_metadata.h +++ b/include/net/dst_metadata.h @@ -45,7 +45,9 @@ skb_tunnel_info(const struct sk_buff *skb) return &md_dst->u.tun_info; dst = skb_dst(skb); - if (dst && dst->lwtstate) + if (dst && dst->lwtstate && + (dst->lwtstate->type == LWTUNNEL_ENCAP_IP || + dst->lwtstate->type == LWTUNNEL_ENCAP_IP6)) return lwt_tun_info(dst->lwtstate); return NULL; diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h index dc5c1e69cd..69c9eabf83 100644 --- a/include/net/flow_offload.h +++ b/include/net/flow_offload.h @@ -319,12 +319,14 @@ flow_action_mixed_hw_stats_check(const struct flow_action *action, if (flow_offload_has_one_action(action)) return true; - flow_action_for_each(i, action_entry, action) { - if (i && action_entry->hw_stats != last_hw_stats) { - NL_SET_ERR_MSG_MOD(extack, "Mixing HW stats types for actions is not supported"); - return false; + if (action) { + flow_action_for_each(i, action_entry, action) { + if (i && action_entry->hw_stats != last_hw_stats) { + NL_SET_ERR_MSG_MOD(extack, "Mixing HW stats types for actions is not supported"); + return false; + } + last_hw_stats = action_entry->hw_stats; } - last_hw_stats = action_entry->hw_stats; } return true; } diff --git a/include/net/icmp.h b/include/net/icmp.h index fd84adc479..caddf4a59a 100644 --- a/include/net/icmp.h +++ b/include/net/icmp.h @@ -57,5 +57,6 @@ int icmp_rcv(struct sk_buff *skb); int icmp_err(struct sk_buff *skb, u32 info); int icmp_init(void); void icmp_out_count(struct net *net, unsigned char type); +bool icmp_build_probe(struct sk_buff *skb, struct icmphdr *icmphdr); #endif /* _ICMP_H */ diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h index 3c8c59471b..b06c2d02ec 100644 --- a/include/net/inet_connection_sock.h +++ b/include/net/inet_connection_sock.h @@ -135,7 +135,7 @@ struct inet_connection_sock { u32 icsk_user_timeout; u64 icsk_ca_priv[104 / sizeof(u64)]; -#define ICSK_CA_PRIV_SIZE (13 * sizeof(u64)) +#define ICSK_CA_PRIV_SIZE sizeof_field(struct inet_connection_sock, icsk_ca_priv) }; #define ICSK_TIME_RETRANS 1 /* Retransmit timer */ diff --git a/include/net/ip.h b/include/net/ip.h index e20874059f..d9683bef86 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -31,6 +31,7 @@ #include #include #include +#include #define IPV4_MAX_PMTU 65535U /* RFC 2675, Section 5.1 */ #define IPV4_MIN_MTU 68 /* RFC 791 */ @@ -445,22 +446,25 @@ static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst, /* 'forwarding = true' case should always honour route mtu */ mtu = dst_metric_raw(dst, RTAX_MTU); - if (mtu) - return mtu; + if (!mtu) + mtu = min(READ_ONCE(dst->dev->mtu), IP_MAX_MTU); - return min(READ_ONCE(dst->dev->mtu), IP_MAX_MTU); + return mtu - lwtunnel_headroom(dst->lwtstate, mtu); } static inline unsigned int ip_skb_dst_mtu(struct sock *sk, const struct sk_buff *skb) { + unsigned int mtu; + if (!sk || !sk_fullsock(sk) || ip_sk_use_pmtu(sk)) { bool forwarding = IPCB(skb)->flags & IPSKB_FORWARDED; return ip_dst_mtu_maybe_forward(skb_dst(skb), forwarding); } - return min(READ_ONCE(skb_dst(skb)->dev->mtu), IP_MAX_MTU); + mtu = min(READ_ONCE(skb_dst(skb)->dev->mtu), IP_MAX_MTU); + return mtu - lwtunnel_headroom(skb_dst(skb)->lwtstate, mtu); } struct dst_metrics *ip_fib_metrics_init(struct net *net, struct nlattr *fc_mx, diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h index f51a118bfc..625a38ccb5 100644 --- a/include/net/ip6_route.h +++ b/include/net/ip6_route.h @@ -263,13 +263,20 @@ static inline bool ipv6_anycast_destination(const struct dst_entry *dst, int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, int (*output)(struct net *, struct sock *, struct sk_buff *)); -static inline int ip6_skb_dst_mtu(struct sk_buff *skb) +static inline unsigned int ip6_skb_dst_mtu(struct sk_buff *skb) { + int mtu; + struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ? inet6_sk(skb->sk) : NULL; - return (np && np->pmtudisc >= IPV6_PMTUDISC_PROBE) ? - skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb)); + if (np && np->pmtudisc >= IPV6_PMTUDISC_PROBE) { + mtu = READ_ONCE(skb_dst(skb)->dev->mtu); + mtu -= lwtunnel_headroom(skb_dst(skb)->lwtstate, mtu); + } else + mtu = dst_mtu(skb_dst(skb)); + + return mtu; } static inline bool ip6_sk_accept_pmtu(const struct sock *sk) @@ -317,7 +324,7 @@ static inline unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst) if (dst_metric_locked(dst, RTAX_MTU)) { mtu = dst_metric_raw(dst, RTAX_MTU); if (mtu) - return mtu; + goto out; } mtu = IPV6_MIN_MTU; @@ -327,7 +334,8 @@ static inline unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst) mtu = idev->cnf.mtu6; rcu_read_unlock(); - return mtu; +out: + return mtu - lwtunnel_headroom(dst->lwtstate, mtu); } u32 ip6_mtu_from_fib6(const struct fib6_result *res, diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index a914f33f3e..3ab2563b1a 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h @@ -466,6 +466,49 @@ int fib_sync_up(struct net_device *dev, unsigned char nh_flags); void fib_sync_mtu(struct net_device *dev, u32 orig_mtu); void fib_nhc_update_mtu(struct fib_nh_common *nhc, u32 new, u32 orig); +/* Fields used for sysctl_fib_multipath_hash_fields. + * Common to IPv4 and IPv6. + * + * Add new fields at the end. This is user API. + */ +#define FIB_MULTIPATH_HASH_FIELD_SRC_IP BIT(0) +#define FIB_MULTIPATH_HASH_FIELD_DST_IP BIT(1) +#define FIB_MULTIPATH_HASH_FIELD_IP_PROTO BIT(2) +#define FIB_MULTIPATH_HASH_FIELD_FLOWLABEL BIT(3) +#define FIB_MULTIPATH_HASH_FIELD_SRC_PORT BIT(4) +#define FIB_MULTIPATH_HASH_FIELD_DST_PORT BIT(5) +#define FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP BIT(6) +#define FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP BIT(7) +#define FIB_MULTIPATH_HASH_FIELD_INNER_IP_PROTO BIT(8) +#define FIB_MULTIPATH_HASH_FIELD_INNER_FLOWLABEL BIT(9) +#define FIB_MULTIPATH_HASH_FIELD_INNER_SRC_PORT BIT(10) +#define FIB_MULTIPATH_HASH_FIELD_INNER_DST_PORT BIT(11) + +#define FIB_MULTIPATH_HASH_FIELD_OUTER_MASK \ + (FIB_MULTIPATH_HASH_FIELD_SRC_IP | \ + FIB_MULTIPATH_HASH_FIELD_DST_IP | \ + FIB_MULTIPATH_HASH_FIELD_IP_PROTO | \ + FIB_MULTIPATH_HASH_FIELD_FLOWLABEL | \ + FIB_MULTIPATH_HASH_FIELD_SRC_PORT | \ + FIB_MULTIPATH_HASH_FIELD_DST_PORT) + +#define FIB_MULTIPATH_HASH_FIELD_INNER_MASK \ + (FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP | \ + FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP | \ + FIB_MULTIPATH_HASH_FIELD_INNER_IP_PROTO | \ + FIB_MULTIPATH_HASH_FIELD_INNER_FLOWLABEL | \ + FIB_MULTIPATH_HASH_FIELD_INNER_SRC_PORT | \ + FIB_MULTIPATH_HASH_FIELD_INNER_DST_PORT) + +#define FIB_MULTIPATH_HASH_FIELD_ALL_MASK \ + (FIB_MULTIPATH_HASH_FIELD_OUTER_MASK | \ + FIB_MULTIPATH_HASH_FIELD_INNER_MASK) + +#define FIB_MULTIPATH_HASH_FIELD_DEFAULT_MASK \ + (FIB_MULTIPATH_HASH_FIELD_SRC_IP | \ + FIB_MULTIPATH_HASH_FIELD_DST_IP | \ + FIB_MULTIPATH_HASH_FIELD_IP_PROTO) + #ifdef CONFIG_IP_ROUTE_MULTIPATH int fib_multipath_hash(const struct net *net, const struct flowi4 *fl4, const struct sk_buff *skb, struct flow_keys *flkeys); diff --git a/include/net/ipv6.h b/include/net/ipv6.h index 448bf2b347..f2d0ecc257 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -926,11 +926,19 @@ static inline int ip6_multipath_hash_policy(const struct net *net) { return net->ipv6.sysctl.multipath_hash_policy; } +static inline u32 ip6_multipath_hash_fields(const struct net *net) +{ + return net->ipv6.sysctl.multipath_hash_fields; +} #else static inline int ip6_multipath_hash_policy(const struct net *net) { return 0; } +static inline u32 ip6_multipath_hash_fields(const struct net *net) +{ + return 0; +} #endif /* diff --git a/include/net/mac80211.h b/include/net/mac80211.h index e89530d0d9..d8a1d09a21 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -7,7 +7,7 @@ * Copyright 2007-2010 Johannes Berg * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015 - 2017 Intel Deutschland GmbH - * Copyright (C) 2018 - 2020 Intel Corporation + * Copyright (C) 2018 - 2021 Intel Corporation */ #ifndef MAC80211_H @@ -526,6 +526,7 @@ struct ieee80211_fils_discovery { * @twt_responder: does this BSS support TWT requester (relevant for managed * mode only, set if the AP advertises TWT responder role) * @twt_protected: does this BSS support protected TWT frames + * @twt_broadcast: does this BSS support broadcast TWT * @assoc: association status * @ibss_joined: indicates whether this station is part of an IBSS * or not @@ -642,6 +643,7 @@ struct ieee80211_bss_conf { bool twt_requester; bool twt_responder; bool twt_protected; + bool twt_broadcast; /* association related data */ bool assoc, ibss_joined; bool ibss_creator; @@ -3344,6 +3346,21 @@ enum ieee80211_reconfig_type { IEEE80211_RECONFIG_TYPE_SUSPEND, }; +/** + * struct ieee80211_prep_tx_info - prepare TX information + * @duration: if non-zero, hint about the required duration, + * only used with the mgd_prepare_tx() method. + * @subtype: frame subtype (auth, (re)assoc, deauth, disassoc) + * @success: whether the frame exchange was successful, only + * used with the mgd_complete_tx() method, and then only + * valid for auth and (re)assoc. + */ +struct ieee80211_prep_tx_info { + u16 duration; + u16 subtype; + u8 success:1; +}; + /** * struct ieee80211_ops - callbacks from mac80211 to the driver * @@ -3756,9 +3773,13 @@ enum ieee80211_reconfig_type { * frame in case that no beacon was heard from the AP/P2P GO. * The callback will be called before each transmission and upon return * mac80211 will transmit the frame right away. - * If duration is greater than zero, mac80211 hints to the driver the - * duration for which the operation is requested. + * Additional information is passed in the &struct ieee80211_prep_tx_info + * data. If duration there is greater than zero, mac80211 hints to the + * driver the duration for which the operation is requested. * The callback is optional and can (should!) sleep. + * @mgd_complete_tx: Notify the driver that the response frame for a previously + * transmitted frame announced with @mgd_prepare_tx was received, the data + * is filled similarly to @mgd_prepare_tx though the duration is not used. * * @mgd_protect_tdls_discover: Protect a TDLS discovery session. After sending * a TDLS discovery-request, we expect a reply to arrive on the AP's @@ -4109,7 +4130,10 @@ struct ieee80211_ops { void (*mgd_prepare_tx)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - u16 duration); + struct ieee80211_prep_tx_info *info); + void (*mgd_complete_tx)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_prep_tx_info *info); void (*mgd_protect_tdls_discover)(struct ieee80211_hw *hw, struct ieee80211_vif *vif); @@ -6184,6 +6208,11 @@ enum rate_control_capabilities { * otherwise the NSS difference doesn't bother us. */ RATE_CTRL_CAPA_VHT_EXT_NSS_BW = BIT(0), + /** + * @RATE_CTRL_CAPA_AMPDU_TRIGGER: + * mac80211 should start A-MPDU sessions on tx + */ + RATE_CTRL_CAPA_AMPDU_TRIGGER = BIT(1), }; struct rate_control_ops { @@ -6576,9 +6605,6 @@ static inline void ieee80211_txq_schedule_end(struct ieee80211_hw *hw, u8 ac) { } -void __ieee80211_schedule_txq(struct ieee80211_hw *hw, - struct ieee80211_txq *txq, bool force); - /** * ieee80211_schedule_txq - schedule a TXQ for transmission * @@ -6591,11 +6617,7 @@ void __ieee80211_schedule_txq(struct ieee80211_hw *hw, * The driver may call this function if it has buffered packets for * this TXQ internally. */ -static inline void -ieee80211_schedule_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq) -{ - __ieee80211_schedule_txq(hw, txq, true); -} +void ieee80211_schedule_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq); /** * ieee80211_return_txq - return a TXQ previously acquired by ieee80211_next_txq() @@ -6607,12 +6629,8 @@ ieee80211_schedule_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq) * The driver may set force=true if it has buffered packets for this TXQ * internally. */ -static inline void -ieee80211_return_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq, - bool force) -{ - __ieee80211_schedule_txq(hw, txq, force); -} +void ieee80211_return_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq, + bool force); /** * ieee80211_txq_may_transmit - check whether TXQ is allowed to transmit @@ -6752,4 +6770,22 @@ struct sk_buff *ieee80211_get_fils_discovery_tmpl(struct ieee80211_hw *hw, struct sk_buff * ieee80211_get_unsol_bcast_probe_resp_tmpl(struct ieee80211_hw *hw, struct ieee80211_vif *vif); + +/** + * ieee80211_is_tx_data - check if frame is a data frame + * + * The function is used to check if a frame is a data frame. Frames with + * hardware encapsulation enabled are data frames. + * + * @skb: the frame to be transmitted. + */ +static inline bool ieee80211_is_tx_data(struct sk_buff *skb) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_hdr *hdr = (void *) skb->data; + + return info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP || + ieee80211_is_data(hdr->frame_control); +} + #endif /* MAC80211_H */ diff --git a/include/net/macsec.h b/include/net/macsec.h index 52874cdfe2..d6fa6b97f6 100644 --- a/include/net/macsec.h +++ b/include/net/macsec.h @@ -241,7 +241,7 @@ struct macsec_context { struct macsec_rx_sc *rx_sc; struct { unsigned char assoc_num; - u8 key[MACSEC_KEYID_LEN]; + u8 key[MACSEC_MAX_KEY_LEN]; union { struct macsec_rx_sa *rx_sa; struct macsec_tx_sa *tx_sa; diff --git a/include/net/mptcp.h b/include/net/mptcp.h index 83f23774b9..8b5af683a8 100644 --- a/include/net/mptcp.h +++ b/include/net/mptcp.h @@ -23,6 +23,7 @@ struct mptcp_ext { u64 data_seq; u32 subflow_seq; u16 data_len; + __sum16 csum; u8 use_map:1, dsn64:1, data_fin:1, @@ -31,7 +32,8 @@ struct mptcp_ext { mpc_map:1, frozen:1, reset_transient:1; - u8 reset_reason:4; + u8 reset_reason:4, + csum_reqd:1; }; #define MPTCP_RM_IDS_MAX 8 @@ -63,8 +65,10 @@ struct mptcp_out_options { struct mptcp_rm_list rm_list; u8 join_id; u8 backup; - u8 reset_reason:4; - u8 reset_transient:1; + u8 reset_reason:4, + reset_transient:1, + csum_reqd:1, + allow_join_id0:1; u32 nonce; u64 thmac; u32 token; @@ -101,7 +105,7 @@ bool mptcp_synack_options(const struct request_sock *req, unsigned int *size, bool mptcp_established_options(struct sock *sk, struct sk_buff *skb, unsigned int *size, unsigned int remaining, struct mptcp_out_options *opts); -void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb); +bool mptcp_incoming_options(struct sock *sk, struct sk_buff *skb); void mptcp_write_options(__be32 *ptr, const struct tcp_sock *tp, struct mptcp_out_options *opts); @@ -223,9 +227,10 @@ static inline bool mptcp_established_options(struct sock *sk, return false; } -static inline void mptcp_incoming_options(struct sock *sk, +static inline bool mptcp_incoming_options(struct sock *sk, struct sk_buff *skb) { + return true; } static inline void mptcp_skb_ext_move(struct sk_buff *to, diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h index bdc0459a59..12cf6d7ea6 100644 --- a/include/net/net_namespace.h +++ b/include/net/net_namespace.h @@ -32,6 +32,7 @@ #include #include #include +#include #include #include #include @@ -170,6 +171,9 @@ struct net { struct sock *crypto_nlsk; #endif struct sock *diag_nlsk; +#if IS_ENABLED(CONFIG_SMC) + struct netns_smc smc; +#endif } __randomize_layout; #include diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h index 06dc6db70d..cc663c68dd 100644 --- a/include/net/netfilter/nf_conntrack.h +++ b/include/net/netfilter/nf_conntrack.h @@ -346,6 +346,13 @@ nf_ct_set(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info info) skb_set_nfct(skb, (unsigned long)ct | info); } +extern unsigned int nf_conntrack_net_id; + +static inline struct nf_conntrack_net *nf_ct_pernet(const struct net *net) +{ + return net_generic(net, nf_conntrack_net_id); +} + #define NF_CT_STAT_INC(net, count) __this_cpu_inc((net)->ct.stat->count) #define NF_CT_STAT_INC_ATOMIC(net, count) this_cpu_inc((net)->ct.stat->count) #define NF_CT_STAT_ADD_ATOMIC(net, count, v) this_cpu_add((net)->ct.stat->count, (v)) diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h index 09f2efea0b..13807ea94c 100644 --- a/include/net/netfilter/nf_conntrack_core.h +++ b/include/net/netfilter/nf_conntrack_core.h @@ -30,7 +30,6 @@ void nf_conntrack_cleanup_net(struct net *net); void nf_conntrack_cleanup_net_list(struct list_head *net_exit_list); void nf_conntrack_proto_pernet_init(struct net *net); -void nf_conntrack_proto_pernet_fini(struct net *net); int nf_conntrack_proto_init(void); void nf_conntrack_proto_fini(void); diff --git a/include/net/netfilter/nf_conntrack_l4proto.h b/include/net/netfilter/nf_conntrack_l4proto.h index 96f9cf81f4..1f47bef517 100644 --- a/include/net/netfilter/nf_conntrack_l4proto.h +++ b/include/net/netfilter/nf_conntrack_l4proto.h @@ -159,22 +159,26 @@ unsigned int nf_ct_port_nlattr_tuple_size(void); extern const struct nla_policy nf_ct_port_nla_policy[]; #ifdef CONFIG_SYSCTL -__printf(3, 4) __cold +__printf(4, 5) __cold void nf_ct_l4proto_log_invalid(const struct sk_buff *skb, const struct nf_conn *ct, + const struct nf_hook_state *state, const char *fmt, ...); -__printf(5, 6) __cold +__printf(4, 5) __cold void nf_l4proto_log_invalid(const struct sk_buff *skb, - struct net *net, - u16 pf, u8 protonum, + const struct nf_hook_state *state, + u8 protonum, const char *fmt, ...); #else -static inline __printf(5, 6) __cold -void nf_l4proto_log_invalid(const struct sk_buff *skb, struct net *net, - u16 pf, u8 protonum, const char *fmt, ...) {} -static inline __printf(3, 4) __cold +static inline __printf(4, 5) __cold +void nf_l4proto_log_invalid(const struct sk_buff *skb, + const struct nf_hook_state *state, + u8 protonum, + const char *fmt, ...) {} +static inline __printf(4, 5) __cold void nf_ct_l4proto_log_invalid(const struct sk_buff *skb, const struct nf_conn *ct, + const struct nf_hook_state *state, const char *fmt, ...) { } #endif /* CONFIG_SYSCTL */ diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h index 48ef7460ff..a3647fadf1 100644 --- a/include/net/netfilter/nf_flow_table.h +++ b/include/net/netfilter/nf_flow_table.h @@ -177,6 +177,8 @@ struct flow_offload { #define NF_FLOW_TIMEOUT (30 * HZ) #define nf_flowtable_time_stamp (u32)jiffies +unsigned long flow_offload_get_timeout(struct flow_offload *flow); + static inline __s32 nf_flow_timeout_delta(unsigned int timeout) { return (__s32)(timeout - nf_flowtable_time_stamp); diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 0a5655e300..148f5d8ee5 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -23,35 +23,46 @@ struct module; struct nft_pktinfo { struct sk_buff *skb; + const struct nf_hook_state *state; bool tprot_set; u8 tprot; - /* for x_tables compatibility */ - struct xt_action_param xt; + u16 fragoff; + unsigned int thoff; }; +static inline struct sock *nft_sk(const struct nft_pktinfo *pkt) +{ + return pkt->state->sk; +} + +static inline unsigned int nft_thoff(const struct nft_pktinfo *pkt) +{ + return pkt->thoff; +} + static inline struct net *nft_net(const struct nft_pktinfo *pkt) { - return pkt->xt.state->net; + return pkt->state->net; } static inline unsigned int nft_hook(const struct nft_pktinfo *pkt) { - return pkt->xt.state->hook; + return pkt->state->hook; } static inline u8 nft_pf(const struct nft_pktinfo *pkt) { - return pkt->xt.state->pf; + return pkt->state->pf; } static inline const struct net_device *nft_in(const struct nft_pktinfo *pkt) { - return pkt->xt.state->in; + return pkt->state->in; } static inline const struct net_device *nft_out(const struct nft_pktinfo *pkt) { - return pkt->xt.state->out; + return pkt->state->out; } static inline void nft_set_pktinfo(struct nft_pktinfo *pkt, @@ -59,16 +70,15 @@ static inline void nft_set_pktinfo(struct nft_pktinfo *pkt, const struct nf_hook_state *state) { pkt->skb = skb; - pkt->xt.state = state; + pkt->state = state; } -static inline void nft_set_pktinfo_unspec(struct nft_pktinfo *pkt, - struct sk_buff *skb) +static inline void nft_set_pktinfo_unspec(struct nft_pktinfo *pkt) { pkt->tprot_set = false; pkt->tprot = 0; - pkt->xt.thoff = 0; - pkt->xt.fragoff = 0; + pkt->thoff = 0; + pkt->fragoff = 0; } /** diff --git a/include/net/netfilter/nf_tables_core.h b/include/net/netfilter/nf_tables_core.h index fd10a7862f..0fa5a6d98a 100644 --- a/include/net/netfilter/nf_tables_core.h +++ b/include/net/netfilter/nf_tables_core.h @@ -3,6 +3,7 @@ #define _NET_NF_TABLES_CORE_H #include +#include extern struct nft_expr_type nft_imm_type; extern struct nft_expr_type nft_cmp_type; @@ -15,6 +16,7 @@ extern struct nft_expr_type nft_range_type; extern struct nft_expr_type nft_meta_type; extern struct nft_expr_type nft_rt_type; extern struct nft_expr_type nft_exthdr_type; +extern struct nft_expr_type nft_last_type; #ifdef CONFIG_NETWORK_SECMARK extern struct nft_object_type nft_secmark_obj_type; @@ -88,6 +90,36 @@ extern const struct nft_set_type nft_set_bitmap_type; extern const struct nft_set_type nft_set_pipapo_type; extern const struct nft_set_type nft_set_pipapo_avx2_type; +#ifdef CONFIG_RETPOLINE +bool nft_rhash_lookup(const struct net *net, const struct nft_set *set, + const u32 *key, const struct nft_set_ext **ext); +bool nft_rbtree_lookup(const struct net *net, const struct nft_set *set, + const u32 *key, const struct nft_set_ext **ext); +bool nft_bitmap_lookup(const struct net *net, const struct nft_set *set, + const u32 *key, const struct nft_set_ext **ext); +bool nft_hash_lookup_fast(const struct net *net, + const struct nft_set *set, + const u32 *key, const struct nft_set_ext **ext); +bool nft_hash_lookup(const struct net *net, const struct nft_set *set, + const u32 *key, const struct nft_set_ext **ext); +bool nft_set_do_lookup(const struct net *net, const struct nft_set *set, + const u32 *key, const struct nft_set_ext **ext); +#else +static inline bool +nft_set_do_lookup(const struct net *net, const struct nft_set *set, + const u32 *key, const struct nft_set_ext **ext) +{ + return set->ops->lookup(net, set, key, ext); +} +#endif + +/* called from nft_pipapo_avx2.c */ +bool nft_pipapo_lookup(const struct net *net, const struct nft_set *set, + const u32 *key, const struct nft_set_ext **ext); +/* called from nft_set_pipapo.c */ +bool nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set, + const u32 *key, const struct nft_set_ext **ext); + struct nft_expr; struct nft_regs; struct nft_pktinfo; diff --git a/include/net/netfilter/nf_tables_ipv4.h b/include/net/netfilter/nf_tables_ipv4.h index 1f7bea39ad..eb4c094cd5 100644 --- a/include/net/netfilter/nf_tables_ipv4.h +++ b/include/net/netfilter/nf_tables_ipv4.h @@ -5,26 +5,24 @@ #include #include -static inline void nft_set_pktinfo_ipv4(struct nft_pktinfo *pkt, - struct sk_buff *skb) +static inline void nft_set_pktinfo_ipv4(struct nft_pktinfo *pkt) { struct iphdr *ip; ip = ip_hdr(pkt->skb); pkt->tprot_set = true; pkt->tprot = ip->protocol; - pkt->xt.thoff = ip_hdrlen(pkt->skb); - pkt->xt.fragoff = ntohs(ip->frag_off) & IP_OFFSET; + pkt->thoff = ip_hdrlen(pkt->skb); + pkt->fragoff = ntohs(ip->frag_off) & IP_OFFSET; } -static inline int __nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt, - struct sk_buff *skb) +static inline int __nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt) { struct iphdr *iph, _iph; u32 len, thoff; - iph = skb_header_pointer(skb, skb_network_offset(skb), sizeof(*iph), - &_iph); + iph = skb_header_pointer(pkt->skb, skb_network_offset(pkt->skb), + sizeof(*iph), &_iph); if (!iph) return -1; @@ -33,42 +31,40 @@ static inline int __nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt, len = ntohs(iph->tot_len); thoff = iph->ihl * 4; - if (skb->len < len) + if (pkt->skb->len < len) return -1; else if (len < thoff) return -1; pkt->tprot_set = true; pkt->tprot = iph->protocol; - pkt->xt.thoff = thoff; - pkt->xt.fragoff = ntohs(iph->frag_off) & IP_OFFSET; + pkt->thoff = thoff; + pkt->fragoff = ntohs(iph->frag_off) & IP_OFFSET; return 0; } -static inline void nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt, - struct sk_buff *skb) +static inline void nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt) { - if (__nft_set_pktinfo_ipv4_validate(pkt, skb) < 0) - nft_set_pktinfo_unspec(pkt, skb); + if (__nft_set_pktinfo_ipv4_validate(pkt) < 0) + nft_set_pktinfo_unspec(pkt); } -static inline int nft_set_pktinfo_ipv4_ingress(struct nft_pktinfo *pkt, - struct sk_buff *skb) +static inline int nft_set_pktinfo_ipv4_ingress(struct nft_pktinfo *pkt) { struct iphdr *iph; u32 len, thoff; - if (!pskb_may_pull(skb, sizeof(*iph))) + if (!pskb_may_pull(pkt->skb, sizeof(*iph))) return -1; - iph = ip_hdr(skb); + iph = ip_hdr(pkt->skb); if (iph->ihl < 5 || iph->version != 4) goto inhdr_error; len = ntohs(iph->tot_len); thoff = iph->ihl * 4; - if (skb->len < len) { + if (pkt->skb->len < len) { __IP_INC_STATS(nft_net(pkt), IPSTATS_MIB_INTRUNCATEDPKTS); return -1; } else if (len < thoff) { @@ -77,8 +73,8 @@ static inline int nft_set_pktinfo_ipv4_ingress(struct nft_pktinfo *pkt, pkt->tprot_set = true; pkt->tprot = iph->protocol; - pkt->xt.thoff = thoff; - pkt->xt.fragoff = ntohs(iph->frag_off) & IP_OFFSET; + pkt->thoff = thoff; + pkt->fragoff = ntohs(iph->frag_off) & IP_OFFSET; return 0; diff --git a/include/net/netfilter/nf_tables_ipv6.h b/include/net/netfilter/nf_tables_ipv6.h index 867de29f3f..7595e02b00 100644 --- a/include/net/netfilter/nf_tables_ipv6.h +++ b/include/net/netfilter/nf_tables_ipv6.h @@ -6,8 +6,7 @@ #include #include -static inline void nft_set_pktinfo_ipv6(struct nft_pktinfo *pkt, - struct sk_buff *skb) +static inline void nft_set_pktinfo_ipv6(struct nft_pktinfo *pkt) { unsigned int flags = IP6_FH_F_AUTH; int protohdr, thoff = 0; @@ -15,18 +14,17 @@ static inline void nft_set_pktinfo_ipv6(struct nft_pktinfo *pkt, protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, &flags); if (protohdr < 0) { - nft_set_pktinfo_unspec(pkt, skb); + nft_set_pktinfo_unspec(pkt); return; } pkt->tprot_set = true; pkt->tprot = protohdr; - pkt->xt.thoff = thoff; - pkt->xt.fragoff = frag_off; + pkt->thoff = thoff; + pkt->fragoff = frag_off; } -static inline int __nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt, - struct sk_buff *skb) +static inline int __nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt) { #if IS_ENABLED(CONFIG_IPV6) unsigned int flags = IP6_FH_F_AUTH; @@ -36,8 +34,8 @@ static inline int __nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt, int protohdr; u32 pkt_len; - ip6h = skb_header_pointer(skb, skb_network_offset(skb), sizeof(*ip6h), - &_ip6h); + ip6h = skb_header_pointer(pkt->skb, skb_network_offset(pkt->skb), + sizeof(*ip6h), &_ip6h); if (!ip6h) return -1; @@ -45,7 +43,7 @@ static inline int __nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt, return -1; pkt_len = ntohs(ip6h->payload_len); - if (pkt_len + sizeof(*ip6h) > skb->len) + if (pkt_len + sizeof(*ip6h) > pkt->skb->len) return -1; protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, &flags); @@ -54,8 +52,8 @@ static inline int __nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt, pkt->tprot_set = true; pkt->tprot = protohdr; - pkt->xt.thoff = thoff; - pkt->xt.fragoff = frag_off; + pkt->thoff = thoff; + pkt->fragoff = frag_off; return 0; #else @@ -63,15 +61,13 @@ static inline int __nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt, #endif } -static inline void nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt, - struct sk_buff *skb) +static inline void nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt) { - if (__nft_set_pktinfo_ipv6_validate(pkt, skb) < 0) - nft_set_pktinfo_unspec(pkt, skb); + if (__nft_set_pktinfo_ipv6_validate(pkt) < 0) + nft_set_pktinfo_unspec(pkt); } -static inline int nft_set_pktinfo_ipv6_ingress(struct nft_pktinfo *pkt, - struct sk_buff *skb) +static inline int nft_set_pktinfo_ipv6_ingress(struct nft_pktinfo *pkt) { #if IS_ENABLED(CONFIG_IPV6) unsigned int flags = IP6_FH_F_AUTH; @@ -82,15 +78,15 @@ static inline int nft_set_pktinfo_ipv6_ingress(struct nft_pktinfo *pkt, int protohdr; u32 pkt_len; - if (!pskb_may_pull(skb, sizeof(*ip6h))) + if (!pskb_may_pull(pkt->skb, sizeof(*ip6h))) return -1; - ip6h = ipv6_hdr(skb); + ip6h = ipv6_hdr(pkt->skb); if (ip6h->version != 6) goto inhdr_error; pkt_len = ntohs(ip6h->payload_len); - if (pkt_len + sizeof(*ip6h) > skb->len) { + if (pkt_len + sizeof(*ip6h) > pkt->skb->len) { idev = __in6_dev_get(nft_in(pkt)); __IP6_INC_STATS(nft_net(pkt), idev, IPSTATS_MIB_INTRUNCATEDPKTS); return -1; @@ -102,8 +98,8 @@ static inline int nft_set_pktinfo_ipv6_ingress(struct nft_pktinfo *pkt, pkt->tprot_set = true; pkt->tprot = protohdr; - pkt->xt.thoff = thoff; - pkt->xt.fragoff = frag_off; + pkt->thoff = thoff; + pkt->fragoff = frag_off; return 0; diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h index ad0a95c233..37e5300c7e 100644 --- a/include/net/netns/conntrack.h +++ b/include/net/netns/conntrack.h @@ -27,6 +27,11 @@ struct nf_tcp_net { u8 tcp_loose; u8 tcp_be_liberal; u8 tcp_max_retrans; + u8 tcp_ignore_invalid_rst; +#if IS_ENABLED(CONFIG_NF_FLOW_TABLE) + unsigned int offload_timeout; + unsigned int offload_pickup; +#endif }; enum udp_conntrack { @@ -37,6 +42,10 @@ enum udp_conntrack { struct nf_udp_net { unsigned int timeouts[UDP_CT_MAX]; +#if IS_ENABLED(CONFIG_NF_FLOW_TABLE) + unsigned int offload_timeout; + unsigned int offload_pickup; +#endif }; struct nf_icmp_net { diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h index f6af8d96d3..b8620519ea 100644 --- a/include/net/netns/ipv4.h +++ b/include/net/netns/ipv4.h @@ -126,6 +126,7 @@ struct netns_ipv4 { u8 sysctl_tcp_syn_retries; u8 sysctl_tcp_synack_retries; u8 sysctl_tcp_syncookies; + u8 sysctl_tcp_migrate_req; int sysctl_tcp_reordering; u8 sysctl_tcp_retries1; u8 sysctl_tcp_retries2; @@ -210,6 +211,7 @@ struct netns_ipv4 { #endif #endif #ifdef CONFIG_IP_ROUTE_MULTIPATH + u32 sysctl_fib_multipath_hash_fields; u8 sysctl_fib_multipath_use_neigh; u8 sysctl_fib_multipath_hash_policy; #endif diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h index 6153c80670..bde0b7adb4 100644 --- a/include/net/netns/ipv6.h +++ b/include/net/netns/ipv6.h @@ -28,8 +28,9 @@ struct netns_sysctl_ipv6 { int ip6_rt_gc_elasticity; int ip6_rt_mtu_expires; int ip6_rt_min_advmss; - u8 bindv6only; + u32 multipath_hash_fields; u8 multipath_hash_policy; + u8 bindv6only; u8 flowlabel_consistency; u8 auto_flowlabels; int icmpv6_time; diff --git a/include/net/netns/sctp.h b/include/net/netns/sctp.h index a0f315effa..40240722cd 100644 --- a/include/net/netns/sctp.h +++ b/include/net/netns/sctp.h @@ -84,6 +84,9 @@ struct netns_sctp { /* HB.interval - 30 seconds */ unsigned int hb_interval; + /* The interval for PLPMTUD probe timer */ + unsigned int probe_interval; + /* Association.Max.Retrans - 10 attempts * Path.Max.Retrans - 5 attempts (per destination address) * Max.Init.Retransmits - 8 attempts diff --git a/include/net/netns/smc.h b/include/net/netns/smc.h new file mode 100644 index 0000000000..ea8a9cf261 --- /dev/null +++ b/include/net/netns/smc.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __NETNS_SMC_H__ +#define __NETNS_SMC_H__ +#include +#include + +struct smc_stats_rsn; +struct smc_stats; +struct netns_smc { + /* per cpu counters for SMC */ + struct smc_stats __percpu *smc_stats; + /* protect fback_rsn */ + struct mutex mutex_fback_rsn; + struct smc_stats_rsn *fback_rsn; +}; +#endif diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h index e816b6a3ef..e946366e8b 100644 --- a/include/net/netns/xfrm.h +++ b/include/net/netns/xfrm.h @@ -42,6 +42,7 @@ struct netns_xfrm { struct hlist_head __rcu *state_bydst; struct hlist_head __rcu *state_bysrc; struct hlist_head __rcu *state_byspi; + struct hlist_head __rcu *state_byseq; unsigned int state_hmask; unsigned int state_num; struct work_struct state_hash_work; diff --git a/include/net/page_pool.h b/include/net/page_pool.h index b4b6de909c..3dd62dd730 100644 --- a/include/net/page_pool.h +++ b/include/net/page_pool.h @@ -146,6 +146,8 @@ inline enum dma_data_direction page_pool_get_dma_dir(struct page_pool *pool) return pool->p.dma_dir; } +bool page_pool_return_skb_page(struct page *page); + struct page_pool *page_pool_create(const struct page_pool_params *params); #ifdef CONFIG_PAGE_POOL @@ -251,4 +253,11 @@ static inline void page_pool_ring_unlock(struct page_pool *pool) spin_unlock_bh(&pool->ring.producer_lock); } +/* Store mem_info on struct page and use it while recycling skb frags */ +static inline +void page_pool_store_mem_info(struct page *page, struct page_pool *pp) +{ + page->pp = pp; +} + #endif /* _NET_PAGE_POOL_H */ diff --git a/include/net/protocol.h b/include/net/protocol.h index 2b778e1d2d..f51c06ae36 100644 --- a/include/net/protocol.h +++ b/include/net/protocol.h @@ -43,7 +43,6 @@ struct net_protocol { int (*err_handler)(struct sk_buff *skb, u32 info); unsigned int no_policy:1, - netns_ok:1, /* does the protocol do more stringent * icmp tag validation than simple * socket lookup? diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h index 479f60ef54..384e800665 100644 --- a/include/net/rtnetlink.h +++ b/include/net/rtnetlink.h @@ -37,6 +37,9 @@ static inline int rtnl_msg_family(const struct nlmsghdr *nlh) * @maxtype: Highest device specific netlink attribute number * @policy: Netlink policy for device specific attribute validation * @validate: Optional validation function for netlink/changelink parameters + * @alloc: netdev allocation function, can be %NULL and is then used + * in place of alloc_netdev_mqs(), in this case @priv_size + * and @setup are unused. Returns a netdev or ERR_PTR(). * @priv_size: sizeof net_device private space * @setup: net_device setup function * @newlink: Function for configuring and registering a new device @@ -63,6 +66,11 @@ struct rtnl_link_ops { const char *kind; size_t priv_size; + struct net_device *(*alloc)(struct nlattr *tb[], + const char *ifname, + unsigned char name_assign_type, + unsigned int num_tx_queues, + unsigned int num_rx_queues); void (*setup)(struct net_device *dev); bool netns_refund; diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 1e625519ae..9ed33e6840 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -37,8 +37,15 @@ enum qdisc_state_t { __QDISC_STATE_SCHED, __QDISC_STATE_DEACTIVATED, __QDISC_STATE_MISSED, + __QDISC_STATE_DRAINING, }; +#define QDISC_STATE_MISSED BIT(__QDISC_STATE_MISSED) +#define QDISC_STATE_DRAINING BIT(__QDISC_STATE_DRAINING) + +#define QDISC_STATE_NON_EMPTY (QDISC_STATE_MISSED | \ + QDISC_STATE_DRAINING) + struct qdisc_size_table { struct rcu_head rcu; struct list_head list; @@ -110,8 +117,6 @@ struct Qdisc { spinlock_t busylock ____cacheline_aligned_in_smp; spinlock_t seqlock; - /* for NOLOCK qdisc, true if there are no enqueued skbs */ - bool empty; struct rcu_head rcu; /* private data */ @@ -145,6 +150,11 @@ static inline bool qdisc_is_running(struct Qdisc *qdisc) return (raw_read_seqcount(&qdisc->running) & 1) ? true : false; } +static inline bool nolock_qdisc_is_empty(const struct Qdisc *qdisc) +{ + return !(READ_ONCE(qdisc->state) & QDISC_STATE_NON_EMPTY); +} + static inline bool qdisc_is_percpu_stats(const struct Qdisc *q) { return q->flags & TCQ_F_CPUSTATS; @@ -153,7 +163,7 @@ static inline bool qdisc_is_percpu_stats(const struct Qdisc *q) static inline bool qdisc_is_empty(const struct Qdisc *qdisc) { if (qdisc_is_percpu_stats(qdisc)) - return READ_ONCE(qdisc->empty); + return nolock_qdisc_is_empty(qdisc); return !READ_ONCE(qdisc->q.qlen); } @@ -161,7 +171,13 @@ static inline bool qdisc_run_begin(struct Qdisc *qdisc) { if (qdisc->flags & TCQ_F_NOLOCK) { if (spin_trylock(&qdisc->seqlock)) - goto nolock_empty; + return true; + + /* Paired with smp_mb__after_atomic() to make sure + * STATE_MISSED checking is synchronized with clearing + * in pfifo_fast_dequeue(). + */ + smp_mb__before_atomic(); /* If the MISSED flag is set, it means other thread has * set the MISSED flag before second spin_trylock(), so @@ -180,14 +196,16 @@ static inline bool qdisc_run_begin(struct Qdisc *qdisc) */ set_bit(__QDISC_STATE_MISSED, &qdisc->state); + /* spin_trylock() only has load-acquire semantic, so use + * smp_mb__after_atomic() to ensure STATE_MISSED is set + * before doing the second spin_trylock(). + */ + smp_mb__after_atomic(); + /* Retry again in case other CPU may not see the new flag * after it releases the lock at the end of qdisc_run_end(). */ - if (!spin_trylock(&qdisc->seqlock)) - return false; - -nolock_empty: - WRITE_ONCE(qdisc->empty, false); + return spin_trylock(&qdisc->seqlock); } else if (qdisc_is_running(qdisc)) { return false; } @@ -201,15 +219,14 @@ static inline bool qdisc_run_begin(struct Qdisc *qdisc) static inline void qdisc_run_end(struct Qdisc *qdisc) { - write_seqcount_end(&qdisc->running); if (qdisc->flags & TCQ_F_NOLOCK) { spin_unlock(&qdisc->seqlock); if (unlikely(test_bit(__QDISC_STATE_MISSED, - &qdisc->state))) { - clear_bit(__QDISC_STATE_MISSED, &qdisc->state); + &qdisc->state))) __netif_schedule(qdisc); - } + } else { + write_seqcount_end(&qdisc->running); } } diff --git a/include/net/sctp/command.h b/include/net/sctp/command.h index 5e848884ff..2058fabffb 100644 --- a/include/net/sctp/command.h +++ b/include/net/sctp/command.h @@ -59,6 +59,7 @@ enum sctp_verb { SCTP_CMD_HB_TIMERS_START, /* Start the heartbeat timers. */ SCTP_CMD_HB_TIMER_UPDATE, /* Update a heartbeat timers. */ SCTP_CMD_HB_TIMERS_STOP, /* Stop the heartbeat timers. */ + SCTP_CMD_PROBE_TIMER_UPDATE, /* Update a probe timer. */ SCTP_CMD_TRANSPORT_HB_SENT, /* Reset the status of a transport. */ SCTP_CMD_TRANSPORT_IDLE, /* Do manipulations on idle transport */ SCTP_CMD_TRANSPORT_ON, /* Mark the transport as active. */ diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h index 14a0d22c91..5859e0a16a 100644 --- a/include/net/sctp/constants.h +++ b/include/net/sctp/constants.h @@ -77,6 +77,7 @@ enum sctp_event_timeout { SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD, SCTP_EVENT_TIMEOUT_HEARTBEAT, SCTP_EVENT_TIMEOUT_RECONF, + SCTP_EVENT_TIMEOUT_PROBE, SCTP_EVENT_TIMEOUT_SACK, SCTP_EVENT_TIMEOUT_AUTOCLOSE, }; @@ -200,6 +201,23 @@ enum sctp_sock_state { SCTP_SS_CLOSING = TCP_CLOSE_WAIT, }; +enum sctp_plpmtud_state { + SCTP_PL_DISABLED, + SCTP_PL_BASE, + SCTP_PL_SEARCH, + SCTP_PL_COMPLETE, + SCTP_PL_ERROR, +}; + +#define SCTP_BASE_PLPMTU 1200 +#define SCTP_MAX_PLPMTU 9000 +#define SCTP_MIN_PLPMTU 512 + +#define SCTP_MAX_PROBES 3 + +#define SCTP_PL_BIG_STEP 32 +#define SCTP_PL_MIN_STEP 4 + /* These functions map various type to printable names. */ const char *sctp_cname(const union sctp_subtype id); /* chunk types */ const char *sctp_oname(const union sctp_subtype id); /* other events */ @@ -342,8 +360,7 @@ enum { #define SCTP_SCOPE_POLICY_MAX SCTP_SCOPE_POLICY_LINK /* Based on IPv4 scoping , - * SCTP IPv4 unusable addresses: 0.0.0.0/8, 224.0.0.0/4, 198.18.0.0/24, - * 192.88.99.0/24. + * SCTP IPv4 unusable addresses: 0.0.0.0/8, 224.0.0.0/4, 192.88.99.0/24. * Also, RFC 8.4, non-unicast addresses are not considered valid SCTP * addresses. */ @@ -351,7 +368,6 @@ enum { ((htonl(INADDR_BROADCAST) == a) || \ ipv4_is_multicast(a) || \ ipv4_is_zeronet(a) || \ - ipv4_is_test_198(a) || \ ipv4_is_anycast_6to4(a)) /* Flags used for the bind address copy functions. */ @@ -424,4 +440,6 @@ enum { */ #define SCTP_AUTH_RANDOM_LENGTH 32 +#define SCTP_PROBE_TIMER_MIN 5000 + #endif /* __sctp_constants_h__ */ diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index 86f74f2fe6..69bab88ad6 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h @@ -145,6 +145,8 @@ struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *, struct sctphdr *, struct sctp_association **, struct sctp_transport **); void sctp_err_finish(struct sock *, struct sctp_transport *); +int sctp_udp_v4_err(struct sock *sk, struct sk_buff *skb); +int sctp_udp_v6_err(struct sock *sk, struct sk_buff *skb); void sctp_icmp_frag_needed(struct sock *, struct sctp_association *, struct sctp_transport *t, __u32 pmtu); void sctp_icmp_redirect(struct sock *, struct sctp_transport *, @@ -573,14 +575,15 @@ static inline struct dst_entry *sctp_transport_dst_check(struct sctp_transport * /* Calculate max payload size given a MTU, or the total overhead if * given MTU is zero */ -static inline __u32 sctp_mtu_payload(const struct sctp_sock *sp, - __u32 mtu, __u32 extra) +static inline __u32 __sctp_mtu_payload(const struct sctp_sock *sp, + const struct sctp_transport *t, + __u32 mtu, __u32 extra) { __u32 overhead = sizeof(struct sctphdr) + extra; if (sp) { overhead += sp->pf->af->net_header_len; - if (sp->udp_port) + if (sp->udp_port && (!t || t->encap_port)) overhead += sizeof(struct udphdr); } else { overhead += sizeof(struct ipv6hdr); @@ -592,6 +595,12 @@ static inline __u32 sctp_mtu_payload(const struct sctp_sock *sp, return mtu ? mtu - overhead : overhead; } +static inline __u32 sctp_mtu_payload(const struct sctp_sock *sp, + __u32 mtu, __u32 extra) +{ + return __sctp_mtu_payload(sp, NULL, mtu, extra); +} + static inline __u32 sctp_dst_mtu(const struct dst_entry *dst) { return SCTP_TRUNC4(max_t(__u32, dst_mtu(dst), @@ -615,6 +624,48 @@ static inline __u32 sctp_min_frag_point(struct sctp_sock *sp, __u16 datasize) return sctp_mtu_payload(sp, SCTP_DEFAULT_MINSEGMENT, datasize); } +static inline int sctp_transport_pl_hlen(struct sctp_transport *t) +{ + return __sctp_mtu_payload(sctp_sk(t->asoc->base.sk), t, 0, 0); +} + +static inline void sctp_transport_pl_reset(struct sctp_transport *t) +{ + if (t->probe_interval && (t->param_flags & SPP_PMTUD_ENABLE) && + (t->state == SCTP_ACTIVE || t->state == SCTP_UNKNOWN)) { + if (t->pl.state == SCTP_PL_DISABLED) { + t->pl.state = SCTP_PL_BASE; + t->pl.pmtu = SCTP_BASE_PLPMTU; + t->pl.probe_size = SCTP_BASE_PLPMTU; + sctp_transport_reset_probe_timer(t); + } + } else { + if (t->pl.state != SCTP_PL_DISABLED) { + if (del_timer(&t->probe_timer)) + sctp_transport_put(t); + t->pl.state = SCTP_PL_DISABLED; + } + } +} + +static inline void sctp_transport_pl_update(struct sctp_transport *t) +{ + if (t->pl.state == SCTP_PL_DISABLED) + return; + + if (del_timer(&t->probe_timer)) + sctp_transport_put(t); + + t->pl.state = SCTP_PL_BASE; + t->pl.pmtu = SCTP_BASE_PLPMTU; + t->pl.probe_size = SCTP_BASE_PLPMTU; +} + +static inline bool sctp_transport_pl_enabled(struct sctp_transport *t) +{ + return t->pl.state != SCTP_PL_DISABLED; +} + static inline bool sctp_newsk_ready(const struct sock *sk) { return sock_flag(sk, SOCK_DEAD) || sk->sk_socket; diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h index fd223c9458..2eb6d7c2c9 100644 --- a/include/net/sctp/sm.h +++ b/include/net/sctp/sm.h @@ -151,6 +151,7 @@ sctp_state_fn_t sctp_sf_cookie_wait_icmp_abort; /* Prototypes for timeout event state functions. */ sctp_state_fn_t sctp_sf_do_6_3_3_rtx; sctp_state_fn_t sctp_sf_send_reconf; +sctp_state_fn_t sctp_sf_send_probe; sctp_state_fn_t sctp_sf_do_6_2_sack; sctp_state_fn_t sctp_sf_autoclose_timer_expire; @@ -225,11 +226,13 @@ struct sctp_chunk *sctp_make_new_encap_port( const struct sctp_association *asoc, const struct sctp_chunk *chunk); struct sctp_chunk *sctp_make_heartbeat(const struct sctp_association *asoc, - const struct sctp_transport *transport); + const struct sctp_transport *transport, + __u32 probe_size); struct sctp_chunk *sctp_make_heartbeat_ack(const struct sctp_association *asoc, const struct sctp_chunk *chunk, const void *payload, const size_t paylen); +struct sctp_chunk *sctp_make_pad(const struct sctp_association *asoc, int len); struct sctp_chunk *sctp_make_op_error(const struct sctp_association *asoc, const struct sctp_chunk *chunk, __be16 cause_code, const void *payload, @@ -310,6 +313,7 @@ int sctp_do_sm(struct net *net, enum sctp_event_type event_type, void sctp_generate_t3_rtx_event(struct timer_list *t); void sctp_generate_heartbeat_event(struct timer_list *t); void sctp_generate_reconf_event(struct timer_list *t); +void sctp_generate_probe_event(struct timer_list *t); void sctp_generate_proto_unreach_event(struct timer_list *t); void sctp_ootb_pkt_free(struct sctp_packet *packet); diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 1aa585216f..32fc4a309d 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -177,6 +177,7 @@ struct sctp_sock { * will be inherited by all new associations. */ __u32 hbinterval; + __u32 probe_interval; __be16 udp_port; __be16 encap_port; @@ -385,6 +386,7 @@ struct sctp_sender_hb_info { union sctp_addr daddr; unsigned long sent_at; __u64 hb_nonce; + __u32 probe_size; }; int sctp_stream_init(struct sctp_stream *stream, __u16 outcnt, __u16 incnt, @@ -461,7 +463,7 @@ struct sctp_af { int saddr); void (*from_sk) (union sctp_addr *, struct sock *sk); - void (*from_addr_param) (union sctp_addr *, + bool (*from_addr_param) (union sctp_addr *, union sctp_addr_param *, __be16 port, int iif); int (*to_addr_param) (const union sctp_addr *, @@ -656,6 +658,7 @@ struct sctp_chunk { data_accepted:1, /* At least 1 chunk accepted */ auth:1, /* IN: was auth'ed | OUT: needs auth */ has_asconf:1, /* IN: have seen an asconf before */ + pmtu_probe:1, /* Used by PLPMTUD, can be set in s HB chunk */ tsn_missing_report:2, /* Data chunk missing counter. */ fast_retransmit:2; /* Is this chunk fast retransmitted? */ }; @@ -858,6 +861,7 @@ struct sctp_transport { * the destination address every heartbeat interval. */ unsigned long hbinterval; + unsigned long probe_interval; /* SACK delay timeout */ unsigned long sackdelay; @@ -934,6 +938,9 @@ struct sctp_transport { /* Timer to handler reconf chunk rtx */ struct timer_list reconf_timer; + /* Timer to send a probe HB packet for PLPMTUD */ + struct timer_list probe_timer; + /* Since we're using per-destination retransmission timers * (see above), we're also using per-destination "transmitted" * queues. This probably ought to be a private struct @@ -976,6 +983,15 @@ struct sctp_transport { char cacc_saw_newack; } cacc; + struct { + __u16 pmtu; + __u16 probe_size; + __u16 probe_high; + __u8 probe_count:3; + __u8 raise_count:5; + __u8 state; + } pl; /* plpmtud related */ + /* 64-bit random number sent with heartbeat. */ __u64 hb_nonce; @@ -993,6 +1009,7 @@ void sctp_transport_free(struct sctp_transport *); void sctp_transport_reset_t3_rtx(struct sctp_transport *); void sctp_transport_reset_hb_timer(struct sctp_transport *); void sctp_transport_reset_reconf_timer(struct sctp_transport *transport); +void sctp_transport_reset_probe_timer(struct sctp_transport *transport); int sctp_transport_hold(struct sctp_transport *); void sctp_transport_put(struct sctp_transport *); void sctp_transport_update_rto(struct sctp_transport *, __u32); @@ -1007,6 +1024,8 @@ bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu); void sctp_transport_immediate_rtx(struct sctp_transport *); void sctp_transport_dst_release(struct sctp_transport *t); void sctp_transport_dst_confirm(struct sctp_transport *t); +void sctp_transport_pl_send(struct sctp_transport *t); +void sctp_transport_pl_recv(struct sctp_transport *t); /* This is the structure we use to queue packets as they come into @@ -1795,6 +1814,7 @@ struct sctp_association { * will be inherited by all new transports. */ unsigned long hbinterval; + unsigned long probe_interval; __be16 encap_port; diff --git a/include/net/sock.h b/include/net/sock.h index 7a7058f4f2..f23cb259b0 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -316,7 +316,9 @@ struct bpf_local_storage; * @sk_timer: sock cleanup timer * @sk_stamp: time stamp of last packet received * @sk_stamp_seq: lock for accessing sk_stamp on 32 bit architectures only - * @sk_tsflags: SO_TIMESTAMPING socket options + * @sk_tsflags: SO_TIMESTAMPING flags + * @sk_bind_phc: SO_TIMESTAMPING bind PHC index of PTP virtual clock + * for timestamping * @sk_tskey: counter to disambiguate concurrent tstamp requests * @sk_zckey: counter to order MSG_ZEROCOPY notifications * @sk_socket: Identd and reporting IO signals @@ -493,6 +495,7 @@ struct sock { seqlock_t sk_stamp_seq; #endif u16 sk_tsflags; + int sk_bind_phc; u8 sk_shutdown; u32 sk_tskey; atomic_t sk_zckey; @@ -2281,6 +2284,8 @@ static inline int sock_error(struct sock *sk) return -err; } +void sk_error_report(struct sock *sk); + static inline unsigned long sock_wspace(struct sock *sk) { int amt = 0; @@ -2752,6 +2757,10 @@ static inline bool sk_dev_equal_l3scope(struct sock *sk, int dif) void sock_def_readable(struct sock *sk); int sock_bindtoindex(struct sock *sk, int ifindex, bool lock_sk); +void sock_set_timestamp(struct sock *sk, int optname, bool valbool); +int sock_set_timestamping(struct sock *sk, int optname, + struct so_timestamping timestamping); + void sock_enable_timestamps(struct sock *sk); void sock_no_linger(struct sock *sk); void sock_set_keepalive(struct sock *sk); diff --git a/include/net/sock_reuseport.h b/include/net/sock_reuseport.h index 505f1e18e9..473b0b0fa4 100644 --- a/include/net/sock_reuseport.h +++ b/include/net/sock_reuseport.h @@ -13,8 +13,9 @@ extern spinlock_t reuseport_lock; struct sock_reuseport { struct rcu_head rcu; - u16 max_socks; /* length of socks */ - u16 num_socks; /* elements in socks */ + u16 max_socks; /* length of socks */ + u16 num_socks; /* elements in socks */ + u16 num_closed_socks; /* closed elements in socks */ /* The last synq overflow event timestamp of this * reuse->socks[] group. */ @@ -31,10 +32,14 @@ extern int reuseport_alloc(struct sock *sk, bool bind_inany); extern int reuseport_add_sock(struct sock *sk, struct sock *sk2, bool bind_inany); extern void reuseport_detach_sock(struct sock *sk); +void reuseport_stop_listen_sock(struct sock *sk); extern struct sock *reuseport_select_sock(struct sock *sk, u32 hash, struct sk_buff *skb, int hdr_len); +struct sock *reuseport_migrate_sock(struct sock *sk, + struct sock *migrating_sk, + struct sk_buff *skb); extern int reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog); extern int reuseport_detach_prog(struct sock *sk); diff --git a/include/net/switchdev.h b/include/net/switchdev.h index f1a5a9a363..e4cac9218c 100644 --- a/include/net/switchdev.h +++ b/include/net/switchdev.h @@ -202,6 +202,7 @@ enum switchdev_notifier_type { struct switchdev_notifier_info { struct net_device *dev; struct netlink_ext_ack *extack; + const void *ctx; }; struct switchdev_notifier_fdb_info { @@ -268,19 +269,19 @@ void switchdev_port_fwd_mark_set(struct net_device *dev, int switchdev_handle_port_obj_add(struct net_device *dev, struct switchdev_notifier_port_obj_info *port_obj_info, bool (*check_cb)(const struct net_device *dev), - int (*add_cb)(struct net_device *dev, + int (*add_cb)(struct net_device *dev, const void *ctx, const struct switchdev_obj *obj, struct netlink_ext_ack *extack)); int switchdev_handle_port_obj_del(struct net_device *dev, struct switchdev_notifier_port_obj_info *port_obj_info, bool (*check_cb)(const struct net_device *dev), - int (*del_cb)(struct net_device *dev, + int (*del_cb)(struct net_device *dev, const void *ctx, const struct switchdev_obj *obj)); int switchdev_handle_port_attr_set(struct net_device *dev, struct switchdev_notifier_port_attr_info *port_attr_info, bool (*check_cb)(const struct net_device *dev), - int (*set_cb)(struct net_device *dev, + int (*set_cb)(struct net_device *dev, const void *ctx, const struct switchdev_attr *attr, struct netlink_ext_ack *extack)); #else @@ -352,7 +353,7 @@ static inline int switchdev_handle_port_obj_add(struct net_device *dev, struct switchdev_notifier_port_obj_info *port_obj_info, bool (*check_cb)(const struct net_device *dev), - int (*add_cb)(struct net_device *dev, + int (*add_cb)(struct net_device *dev, const void *ctx, const struct switchdev_obj *obj, struct netlink_ext_ack *extack)) { @@ -363,7 +364,7 @@ static inline int switchdev_handle_port_obj_del(struct net_device *dev, struct switchdev_notifier_port_obj_info *port_obj_info, bool (*check_cb)(const struct net_device *dev), - int (*del_cb)(struct net_device *dev, + int (*del_cb)(struct net_device *dev, const void *ctx, const struct switchdev_obj *obj)) { return 0; @@ -373,7 +374,7 @@ static inline int switchdev_handle_port_attr_set(struct net_device *dev, struct switchdev_notifier_port_attr_info *port_attr_info, bool (*check_cb)(const struct net_device *dev), - int (*set_cb)(struct net_device *dev, + int (*set_cb)(struct net_device *dev, const void *ctx, const struct switchdev_attr *attr, struct netlink_ext_ack *extack)) { diff --git a/include/net/tc_act/tc_vlan.h b/include/net/tc_act/tc_vlan.h index f051046ba0..f94b8bc26f 100644 --- a/include/net/tc_act/tc_vlan.h +++ b/include/net/tc_act/tc_vlan.h @@ -16,6 +16,7 @@ struct tcf_vlan_params { u16 tcfv_push_vid; __be16 tcfv_push_proto; u8 tcfv_push_prio; + bool tcfv_push_prio_exists; struct rcu_head rcu; }; diff --git a/include/net/tcp.h b/include/net/tcp.h index d05193cb0d..17df9b047e 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -412,6 +412,10 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, int flags, int *addr_len); int tcp_set_rcvlowat(struct sock *sk, int val); int tcp_set_window_clamp(struct sock *sk, int val); +void tcp_update_recv_tstamps(struct sk_buff *skb, + struct scm_timestamping_internal *tss); +void tcp_recv_timestamp(struct msghdr *msg, const struct sock *sk, + struct scm_timestamping_internal *tss); void tcp_data_ready(struct sock *sk); #ifdef CONFIG_MMU int tcp_mmap(struct file *file, struct socket *sock, @@ -682,6 +686,10 @@ static inline u32 __tcp_set_rto(const struct tcp_sock *tp) static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd) { + /* mptcp hooks are only on the slow path */ + if (sk_is_mptcp((struct sock *)tp)) + return; + tp->pred_flags = htonl((tp->tcp_header_len << 26) | ntohl(TCP_FLAG_ACK) | snd_wnd); diff --git a/include/net/tls.h b/include/net/tls.h index 8341a8d1e8..be4b3e1cac 100644 --- a/include/net/tls.h +++ b/include/net/tls.h @@ -79,8 +79,6 @@ __SNMP_INC_STATS((net)->mib.tls_statistics, field) #define TLS_INC_STATS(net, field) \ SNMP_INC_STATS((net)->mib.tls_statistics, field) -#define __TLS_DEC_STATS(net, field) \ - __SNMP_DEC_STATS((net)->mib.tls_statistics, field) #define TLS_DEC_STATS(net, field) \ SNMP_DEC_STATS((net)->mib.tls_statistics, field) @@ -471,7 +469,7 @@ static inline bool tls_is_sk_tx_device_offloaded(struct sock *sk) static inline void tls_err_abort(struct sock *sk, int err) { sk->sk_err = err; - sk->sk_error_report(sk); + sk_error_report(sk); } static inline bool tls_bigint_increment(unsigned char *seq, int len) diff --git a/include/net/xdp.h b/include/net/xdp.h index a5bc214a49..5533f0ab2a 100644 --- a/include/net/xdp.h +++ b/include/net/xdp.h @@ -170,6 +170,7 @@ struct sk_buff *__xdp_build_skb_from_frame(struct xdp_frame *xdpf, struct sk_buff *xdp_build_skb_from_frame(struct xdp_frame *xdpf, struct net_device *dev); int xdp_alloc_skb_bulk(void **skbs, int n_skb, gfp_t gfp); +struct xdp_frame *xdpf_clone(struct xdp_frame *xdpf); static inline void xdp_convert_frame_to_buff(struct xdp_frame *frame, struct xdp_buff *xdp) diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h index 9c0722c6d7..fff069d2ed 100644 --- a/include/net/xdp_sock.h +++ b/include/net/xdp_sock.h @@ -37,7 +37,7 @@ struct xdp_umem { struct xsk_map { struct bpf_map map; spinlock_t lock; /* Synchronize map updates */ - struct xdp_sock *xsk_map[]; + struct xdp_sock __rcu *xsk_map[]; }; struct xdp_sock { diff --git a/include/net/xfrm.h b/include/net/xfrm.h index c58a6d4eb6..cbff7c2a97 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -145,6 +145,12 @@ enum { XFRM_MODE_FLAG_TUNNEL = 1, }; +enum xfrm_replay_mode { + XFRM_REPLAY_MODE_LEGACY, + XFRM_REPLAY_MODE_BMP, + XFRM_REPLAY_MODE_ESN, +}; + /* Full description of state of transformer. */ struct xfrm_state { possible_net_t xs_net; @@ -154,6 +160,7 @@ struct xfrm_state { }; struct hlist_node bysrc; struct hlist_node byspi; + struct hlist_node byseq; refcount_t refcnt; spinlock_t lock; @@ -214,9 +221,8 @@ struct xfrm_state { struct xfrm_replay_state preplay; struct xfrm_replay_state_esn *preplay_esn; - /* The functions for replay detection. */ - const struct xfrm_replay *repl; - + /* replay detection mode */ + enum xfrm_replay_mode repl_mode; /* internal flag that only holds state for delayed aevent at the * moment */ @@ -296,18 +302,6 @@ struct km_event { struct net *net; }; -struct xfrm_replay { - void (*advance)(struct xfrm_state *x, __be32 net_seq); - int (*check)(struct xfrm_state *x, - struct sk_buff *skb, - __be32 net_seq); - int (*recheck)(struct xfrm_state *x, - struct sk_buff *skb, - __be32 net_seq); - void (*notify)(struct xfrm_state *x, int event); - int (*overflow)(struct xfrm_state *x, struct sk_buff *skb); -}; - struct xfrm_if_cb { struct xfrm_if *(*decode_session)(struct sk_buff *skb, unsigned short family); @@ -387,7 +381,6 @@ void xfrm_flush_gc(void); void xfrm_state_delete_tunnel(struct xfrm_state *x); struct xfrm_type { - char *description; struct module *owner; u8 proto; u8 flags; @@ -402,14 +395,12 @@ struct xfrm_type { int (*output)(struct xfrm_state *, struct sk_buff *pskb); int (*reject)(struct xfrm_state *, struct sk_buff *, const struct flowi *); - int (*hdr_offset)(struct xfrm_state *, struct sk_buff *, u8 **); }; int xfrm_register_type(const struct xfrm_type *type, unsigned short family); void xfrm_unregister_type(const struct xfrm_type *type, unsigned short family); struct xfrm_type_offload { - char *description; struct module *owner; u8 proto; void (*encap)(struct xfrm_state *, struct sk_buff *pskb); @@ -1024,6 +1015,7 @@ struct xfrm_offload { #define CRYPTO_INVALID_PROTOCOL 128 __u8 proto; + __u8 inner_ipproto; }; struct sec_path { @@ -1546,6 +1538,7 @@ void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si); void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si); u32 xfrm_replay_seqhi(struct xfrm_state *x, __be32 net_seq); int xfrm_init_replay(struct xfrm_state *x); +u32 __xfrm_state_mtu(struct xfrm_state *x, int mtu); u32 xfrm_state_mtu(struct xfrm_state *x, int mtu); int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload); int xfrm_init_state(struct xfrm_state *x); @@ -1570,7 +1563,6 @@ int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type); int xfrm4_transport_finish(struct sk_buff *skb, int async); int xfrm4_rcv(struct sk_buff *skb); -int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq); static inline int xfrm4_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi) { @@ -1581,7 +1573,6 @@ static inline int xfrm4_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi) } int xfrm4_output(struct net *net, struct sock *sk, struct sk_buff *skb); -int xfrm4_output_finish(struct sock *sk, struct sk_buff *skb); int xfrm4_protocol_register(struct xfrm4_protocol *handler, unsigned char protocol); int xfrm4_protocol_deregister(struct xfrm4_protocol *handler, unsigned char protocol); int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family); @@ -1605,9 +1596,6 @@ int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler, unsigned short family) __be32 xfrm6_tunnel_alloc_spi(struct net *net, xfrm_address_t *saddr); __be32 xfrm6_tunnel_spi_lookup(struct net *net, const xfrm_address_t *saddr); int xfrm6_output(struct net *net, struct sock *sk, struct sk_buff *skb); -int xfrm6_output_finish(struct sock *sk, struct sk_buff *skb); -int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb, - u8 **prevhdr); #ifdef CONFIG_XFRM void xfrm6_local_rxpmtu(struct sk_buff *skb, u32 mtu); @@ -1721,6 +1709,12 @@ static inline int xfrm_policy_id2dir(u32 index) } #ifdef CONFIG_XFRM +void xfrm_replay_advance(struct xfrm_state *x, __be32 net_seq); +int xfrm_replay_check(struct xfrm_state *x, struct sk_buff *skb, __be32 net_seq); +void xfrm_replay_notify(struct xfrm_state *x, int event); +int xfrm_replay_overflow(struct xfrm_state *x, struct sk_buff *skb); +int xfrm_replay_recheck(struct xfrm_state *x, struct sk_buff *skb, __be32 net_seq); + static inline int xfrm_aevent_is_on(struct net *net) { struct sock *nlsk; diff --git a/include/net/xsk_buff_pool.h b/include/net/xsk_buff_pool.h index eaa8386dbc..7a9a23e7a6 100644 --- a/include/net/xsk_buff_pool.h +++ b/include/net/xsk_buff_pool.h @@ -147,11 +147,16 @@ static inline bool xp_desc_crosses_non_contig_pg(struct xsk_buff_pool *pool, { bool cross_pg = (addr & (PAGE_SIZE - 1)) + len > PAGE_SIZE; - if (pool->dma_pages_cnt && cross_pg) { + if (likely(!cross_pg)) + return false; + + if (pool->dma_pages_cnt) { return !(pool->dma_pages[addr >> PAGE_SHIFT] & XSK_NEXT_PG_CONTIG_MASK); } - return false; + + /* skb path */ + return addr + len > pool->addrs_cnt; } static inline u64 xp_aligned_extract_addr(struct xsk_buff_pool *pool, u64 addr) diff --git a/include/rdma/ib_hdrs.h b/include/rdma/ib_hdrs.h index 57c1ac881d..7e54220586 100644 --- a/include/rdma/ib_hdrs.h +++ b/include/rdma/ib_hdrs.h @@ -206,11 +206,6 @@ static inline u8 ib_get_lver(struct ib_header *hdr) IB_LVER_MASK); } -static inline u16 ib_get_len(struct ib_header *hdr) -{ - return (u16)(be16_to_cpu(hdr->lrh[2])); -} - static inline u32 ib_get_qkey(struct ib_other_headers *ohdr) { return be32_to_cpu(ohdr->u.ud.deth[0]); diff --git a/include/rdma/ib_mad.h b/include/rdma/ib_mad.h index f1d34f06a6..465b0d0bda 100644 --- a/include/rdma/ib_mad.h +++ b/include/rdma/ib_mad.h @@ -717,28 +717,27 @@ int ib_post_send_mad(struct ib_mad_send_buf *send_buf, */ void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc); -/** - * ib_cancel_mad - Cancels an outstanding send MAD operation. - * @mad_agent: Specifies the registration associated with sent MAD. - * @send_buf: Indicates the MAD to cancel. - * - * MADs will be returned to the user through the corresponding - * ib_mad_send_handler. - */ -void ib_cancel_mad(struct ib_mad_agent *mad_agent, - struct ib_mad_send_buf *send_buf); - /** * ib_modify_mad - Modifies an outstanding send MAD operation. - * @mad_agent: Specifies the registration associated with sent MAD. * @send_buf: Indicates the MAD to modify. * @timeout_ms: New timeout value for sent MAD. * * This call will reset the timeout value for a sent MAD to the specified * value. */ -int ib_modify_mad(struct ib_mad_agent *mad_agent, - struct ib_mad_send_buf *send_buf, u32 timeout_ms); +int ib_modify_mad(struct ib_mad_send_buf *send_buf, u32 timeout_ms); + +/** + * ib_cancel_mad - Cancels an outstanding send MAD operation. + * @send_buf: Indicates the MAD to cancel. + * + * MADs will be returned to the user through the corresponding + * ib_mad_send_handler. + */ +static inline void ib_cancel_mad(struct ib_mad_send_buf *send_buf) +{ + ib_modify_mad(send_buf, 0); +} /** * ib_create_send_mad - Allocate and initialize a data buffer and work request diff --git a/include/rdma/ib_sysfs.h b/include/rdma/ib_sysfs.h new file mode 100644 index 0000000000..3b77cfd74d --- /dev/null +++ b/include/rdma/ib_sysfs.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (c) 2021 Mellanox Technologies Ltd. All rights reserved. + */ +#ifndef DEF_RDMA_IB_SYSFS_H +#define DEF_RDMA_IB_SYSFS_H + +#include + +struct ib_device; + +struct ib_port_attribute { + struct attribute attr; + ssize_t (*show)(struct ib_device *ibdev, u32 port_num, + struct ib_port_attribute *attr, char *buf); + ssize_t (*store)(struct ib_device *ibdev, u32 port_num, + struct ib_port_attribute *attr, const char *buf, + size_t count); +}; + +#define IB_PORT_ATTR_RW(_name) \ + struct ib_port_attribute ib_port_attr_##_name = __ATTR_RW(_name) + +#define IB_PORT_ATTR_ADMIN_RW(_name) \ + struct ib_port_attribute ib_port_attr_##_name = \ + __ATTR_RW_MODE(_name, 0600) + +#define IB_PORT_ATTR_RO(_name) \ + struct ib_port_attribute ib_port_attr_##_name = __ATTR_RO(_name) + +#define IB_PORT_ATTR_WO(_name) \ + struct ib_port_attribute ib_port_attr_##_name = __ATTR_WO(_name) + +struct ib_device *ib_port_sysfs_get_ibdev_kobj(struct kobject *kobj, + u32 *port_num); + +#endif diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 7e2f3699b8..371df1c80a 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -50,6 +50,8 @@ struct ib_uqp_object; struct ib_usrq_object; struct ib_uwq_object; struct rdma_cm_id; +struct ib_port; +struct hw_stats_device_data; extern struct workqueue_struct *ib_wq; extern struct workqueue_struct *ib_comp_wq; @@ -2139,7 +2141,6 @@ struct ib_flow_action { }; struct ib_mad; -struct ib_grh; enum ib_process_mad_flags { IB_MAD_IGNORE_MKEY = 1, @@ -2175,15 +2176,17 @@ struct ib_port_data { struct ib_port_immutable immutable; spinlock_t pkey_list_lock; + + spinlock_t netdev_lock; + struct list_head pkey_list; struct ib_port_cache cache; - spinlock_t netdev_lock; struct net_device __rcu *netdev; struct hlist_node ndev_hash_link; struct rdma_port_counter port_counter; - struct rdma_hw_stats *hw_stats; + struct ib_port *sysfs; }; /* rdma netdev type - specifies protocol type */ @@ -2299,6 +2302,14 @@ struct ib_device_ops { u32 uverbs_abi_ver; unsigned int uverbs_no_driver_id_binding:1; + /* + * NOTE: New drivers should not make use of device_group; instead new + * device parameter should be exposed via netlink command. This + * mechanism exists only for existing drivers. + */ + const struct attribute_group *device_group; + const struct attribute_group **port_groups; + int (*post_send)(struct ib_qp *qp, const struct ib_send_wr *send_wr, const struct ib_send_wr **bad_send_wr); int (*post_recv)(struct ib_qp *qp, const struct ib_recv_wr *recv_wr, @@ -2459,6 +2470,14 @@ struct ib_device_ops { enum ib_uverbs_advise_mr_advice advice, u32 flags, struct ib_sge *sg_list, u32 num_sge, struct uverbs_attr_bundle *attrs); + + /* + * Kernel users should universally support relaxed ordering (RO), as + * they are designed to read data only after observing the CQE and use + * the DMA API correctly. + * + * Some drivers implicitly enable RO if platform supports it. + */ int (*map_mr_sg)(struct ib_mr *mr, struct scatterlist *sg, int sg_nents, unsigned int *sg_offset); int (*check_mr_status)(struct ib_mr *mr, u32 check_mask, @@ -2523,13 +2542,14 @@ struct ib_device_ops { unsigned int *meta_sg_offset); /** - * alloc_hw_stats - Allocate a struct rdma_hw_stats and fill in the - * driver initialized data. The struct is kfree()'ed by the sysfs - * core when the device is removed. A lifespan of -1 in the return - * struct tells the core to set a default lifespan. + * alloc_hw_[device,port]_stats - Allocate a struct rdma_hw_stats and + * fill in the driver initialized data. The struct is kfree()'ed by + * the sysfs core when the device is removed. A lifespan of -1 in the + * return struct tells the core to set a default lifespan. */ - struct rdma_hw_stats *(*alloc_hw_stats)(struct ib_device *device, - u32 port_num); + struct rdma_hw_stats *(*alloc_hw_device_stats)(struct ib_device *device); + struct rdma_hw_stats *(*alloc_hw_port_stats)(struct ib_device *device, + u32 port_num); /** * get_hw_stats - Fill in the counter value(s) in the stats struct. * @index - The index in the value array we wish to have updated, or @@ -2544,12 +2564,7 @@ struct ib_device_ops { */ int (*get_hw_stats)(struct ib_device *device, struct rdma_hw_stats *stats, u32 port, int index); - /* - * This function is called once for each port when a ib device is - * registered. - */ - int (*init_port)(struct ib_device *device, u32 port_num, - struct kobject *port_sysfs); + /** * Allows rdma drivers to add their own restrack attributes. */ @@ -2675,11 +2690,12 @@ struct ib_device { struct ib_core_device coredev; }; - /* First group for device attributes, - * Second group for driver provided attributes (optional). - * It is NULL terminated array. + /* First group is for device attributes, + * Second group is for driver provided attributes (optional). + * Third group is for the hw_stats + * It is a NULL terminated array. */ - const struct attribute_group *groups[3]; + const struct attribute_group *groups[4]; u64 uverbs_cmd_mask; @@ -2694,8 +2710,7 @@ struct ib_device { u8 node_type; u32 phys_port_cnt; struct ib_device_attr attrs; - struct attribute_group *hw_stats_ag; - struct rdma_hw_stats *hw_stats; + struct hw_stats_device_data *hw_stats_data; #ifdef CONFIG_CGROUP_RDMA struct rdmacg_device cg_device; @@ -4286,8 +4301,6 @@ struct net_device *ib_device_netdev(struct ib_device *dev, u32 port); struct ib_wq *ib_create_wq(struct ib_pd *pd, struct ib_wq_init_attr *init_attr); int ib_destroy_wq_user(struct ib_wq *wq, struct ib_udata *udata); -int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *attr, - u32 wq_attr_mask); int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents, unsigned int *sg_offset, unsigned int page_size); @@ -4570,28 +4583,6 @@ int rdma_init_netdev(struct ib_device *device, u32 port_num, void (*setup)(struct net_device *), struct net_device *netdev); -/** - * rdma_set_device_sysfs_group - Set device attributes group to have - * driver specific sysfs entries at - * for infiniband class. - * - * @device: device pointer for which attributes to be created - * @group: Pointer to group which should be added when device - * is registered with sysfs. - * rdma_set_device_sysfs_group() allows existing drivers to expose one - * group per device to have sysfs attributes. - * - * NOTE: New drivers should not make use of this API; instead new device - * parameter should be exposed via netlink command. This API and mechanism - * exist only for existing drivers. - */ -static inline void -rdma_set_device_sysfs_group(struct ib_device *dev, - const struct attribute_group *group) -{ - dev->groups[1] = group; -} - /** * rdma_device_to_ibdev - Get ib_device pointer from device pointer * @@ -4627,7 +4618,7 @@ static inline int ibdev_to_node(struct ib_device *ibdev) * * NOTE: New drivers should not make use of this API; This API is only for * existing drivers who have exposed sysfs entries using - * rdma_set_device_sysfs_group(). + * ops->device_group. */ #define rdma_device_to_drv_device(dev, drv_dev_struct, ibdev_member) \ container_of(rdma_device_to_ibdev(dev), drv_dev_struct, ibdev_member) diff --git a/include/scsi/fc/fc_ms.h b/include/scsi/fc/fc_ms.h index 9e273fed0a..0019169523 100644 --- a/include/scsi/fc/fc_ms.h +++ b/include/scsi/fc/fc_ms.h @@ -24,6 +24,12 @@ */ #define FC_FDMI_SUBTYPE 0x10 /* fs_ct_hdr.ct_fs_subtype */ +/* + * Management server FDMI specifications. + */ +#define FDMI_V1 1 /* FDMI version 1 specifications */ +#define FDMI_V2 2 /* FDMI version 2 specifications */ + /* * Management server FDMI Requests. */ @@ -57,22 +63,36 @@ enum fc_fdmi_hba_attr_type { FC_FDMI_HBA_ATTR_FIRMWAREVERSION = 0x0009, FC_FDMI_HBA_ATTR_OSNAMEVERSION = 0x000A, FC_FDMI_HBA_ATTR_MAXCTPAYLOAD = 0x000B, + FC_FDMI_HBA_ATTR_NODESYMBLNAME = 0x000C, + FC_FDMI_HBA_ATTR_VENDORSPECIFICINFO = 0x000D, + FC_FDMI_HBA_ATTR_NUMBEROFPORTS = 0x000E, + FC_FDMI_HBA_ATTR_FABRICNAME = 0x000F, + FC_FDMI_HBA_ATTR_BIOSVERSION = 0x0010, + FC_FDMI_HBA_ATTR_BIOSSTATE = 0x0011, + FC_FDMI_HBA_ATTR_VENDORIDENTIFIER = 0x00E0, }; /* * HBA Attribute Length */ #define FC_FDMI_HBA_ATTR_NODENAME_LEN 8 -#define FC_FDMI_HBA_ATTR_MANUFACTURER_LEN 80 -#define FC_FDMI_HBA_ATTR_SERIALNUMBER_LEN 80 -#define FC_FDMI_HBA_ATTR_MODEL_LEN 256 -#define FC_FDMI_HBA_ATTR_MODELDESCR_LEN 256 -#define FC_FDMI_HBA_ATTR_HARDWAREVERSION_LEN 256 -#define FC_FDMI_HBA_ATTR_DRIVERVERSION_LEN 256 -#define FC_FDMI_HBA_ATTR_OPTIONROMVERSION_LEN 256 -#define FC_FDMI_HBA_ATTR_FIRMWAREVERSION_LEN 256 -#define FC_FDMI_HBA_ATTR_OSNAMEVERSION_LEN 256 +#define FC_FDMI_HBA_ATTR_MANUFACTURER_LEN 64 +#define FC_FDMI_HBA_ATTR_SERIALNUMBER_LEN 64 +#define FC_FDMI_HBA_ATTR_MODEL_LEN 64 +#define FC_FDMI_HBA_ATTR_MODELDESCR_LEN 64 +#define FC_FDMI_HBA_ATTR_HARDWAREVERSION_LEN 64 +#define FC_FDMI_HBA_ATTR_DRIVERVERSION_LEN 64 +#define FC_FDMI_HBA_ATTR_OPTIONROMVERSION_LEN 64 +#define FC_FDMI_HBA_ATTR_FIRMWAREVERSION_LEN 64 +#define FC_FDMI_HBA_ATTR_OSNAMEVERSION_LEN 128 #define FC_FDMI_HBA_ATTR_MAXCTPAYLOAD_LEN 4 +#define FC_FDMI_HBA_ATTR_NODESYMBLNAME_LEN 64 +#define FC_FDMI_HBA_ATTR_VENDORSPECIFICINFO_LEN 4 +#define FC_FDMI_HBA_ATTR_NUMBEROFPORTS_LEN 4 +#define FC_FDMI_HBA_ATTR_FABRICNAME_LEN 8 +#define FC_FDMI_HBA_ATTR_BIOSVERSION_LEN 64 +#define FC_FDMI_HBA_ATTR_BIOSSTATE_LEN 4 +#define FC_FDMI_HBA_ATTR_VENDORIDENTIFIER_LEN 8 /* * Port Attribute Type @@ -84,6 +104,16 @@ enum fc_fdmi_port_attr_type { FC_FDMI_PORT_ATTR_MAXFRAMESIZE = 0x0004, FC_FDMI_PORT_ATTR_OSDEVICENAME = 0x0005, FC_FDMI_PORT_ATTR_HOSTNAME = 0x0006, + FC_FDMI_PORT_ATTR_NODENAME = 0x0007, + FC_FDMI_PORT_ATTR_PORTNAME = 0x0008, + FC_FDMI_PORT_ATTR_SYMBOLICNAME = 0x0009, + FC_FDMI_PORT_ATTR_PORTTYPE = 0x000A, + FC_FDMI_PORT_ATTR_SUPPORTEDCLASSSRVC = 0x000B, + FC_FDMI_PORT_ATTR_FABRICNAME = 0x000C, + FC_FDMI_PORT_ATTR_CURRENTFC4TYPE = 0x000D, + FC_FDMI_PORT_ATTR_PORTSTATE = 0x101, + FC_FDMI_PORT_ATTR_DISCOVEREDPORTS = 0x102, + FC_FDMI_PORT_ATTR_PORTID = 0x103, }; /* @@ -95,6 +125,17 @@ enum fc_fdmi_port_attr_type { #define FC_FDMI_PORT_ATTR_MAXFRAMESIZE_LEN 4 #define FC_FDMI_PORT_ATTR_OSDEVICENAME_LEN 256 #define FC_FDMI_PORT_ATTR_HOSTNAME_LEN 256 +#define FC_FDMI_PORT_ATTR_NODENAME_LEN 8 +#define FC_FDMI_PORT_ATTR_PORTNAME_LEN 8 +#define FC_FDMI_PORT_ATTR_SYMBOLICNAME_LEN 256 +#define FC_FDMI_PORT_ATTR_PORTTYPE_LEN 4 +#define FC_FDMI_PORT_ATTR_SUPPORTEDCLASSSRVC_LEN 4 +#define FC_FDMI_PORT_ATTR_FABRICNAME_LEN 8 +#define FC_FDMI_PORT_ATTR_CURRENTFC4TYPE_LEN 32 +#define FC_FDMI_PORT_ATTR_PORTSTATE_LEN 4 +#define FC_FDMI_PORT_ATTR_DISCOVEREDPORTS_LEN 4 +#define FC_FDMI_PORT_ATTR_PORTID_LEN 4 + /* * HBA Attribute ID diff --git a/include/scsi/iscsi_proto.h b/include/scsi/iscsi_proto.h index b71b5c4f41..7b192d88f1 100644 --- a/include/scsi/iscsi_proto.h +++ b/include/scsi/iscsi_proto.h @@ -52,7 +52,7 @@ static inline int iscsi_sna_gte(u32 n1, u32 n2) } /* - * useful common(control and data pathes) macro + * useful common(control and data paths) macro */ #define ntoh24(p) (((p)[0] << 16) | ((p)[1] << 8) | ((p)[2])) #define hton24(p, v) { \ diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h index 9b87e1a1c6..eeb8d689ff 100644 --- a/include/scsi/libfc.h +++ b/include/scsi/libfc.h @@ -399,7 +399,7 @@ struct fc_seq { * @sid: Source FCID * @did: Destination FCID * @esb_stat: ESB exchange status - * @r_a_tov: Resouce allocation time out value (in msecs) + * @r_a_tov: Resource allocation time out value (in msecs) * @seq_id: The next sequence ID to use * @encaps: encapsulation information for lower-level driver * @f_ctl: F_CTL flags for the sequence @@ -668,7 +668,7 @@ enum fc_lport_event { * @wwnn: World Wide Node Name * @service_params: Common service parameters * @e_d_tov: Error detection timeout value - * @r_a_tov: Resouce allocation timeout value + * @r_a_tov: Resource allocation timeout value * @rnid_gen: RNID information * @sg_supp: Indicates if scatter gather is supported * @seq_offload: Indicates if sequence offload is supported @@ -841,7 +841,7 @@ static inline void fc_lport_free_stats(struct fc_lport *lport) /** * lport_priv() - Return the private data from a local port - * @lport: The local port whose private data is to be retreived + * @lport: The local port whose private data is to be retrieved */ static inline void *lport_priv(const struct fc_lport *lport) { diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h index 02f966e935..4ee233e5a6 100644 --- a/include/scsi/libiscsi.h +++ b/include/scsi/libiscsi.h @@ -145,6 +145,13 @@ static inline void* iscsi_next_hdr(struct iscsi_task *task) return (void*)task->hdr + task->hdr_len; } +static inline bool iscsi_task_is_completed(struct iscsi_task *task) +{ + return task->state == ISCSI_TASK_COMPLETED || + task->state == ISCSI_TASK_ABRT_TMF || + task->state == ISCSI_TASK_ABRT_SESS_RECOV; +} + /* Connection's states */ enum { ISCSI_CONN_INITIAL_STAGE, @@ -195,12 +202,6 @@ struct iscsi_conn { unsigned long suspend_tx; /* suspend Tx */ unsigned long suspend_rx; /* suspend Rx */ - /* abort */ - wait_queue_head_t ehwait; /* used in eh_abort() */ - struct iscsi_tm tmhdr; - struct timer_list tmf_timer; - int tmf_state; /* see TMF_INITIAL, etc.*/ - /* negotiated params */ unsigned max_recv_dlength; /* initiator_max_recv_dsl*/ unsigned max_xmit_dlength; /* target_max_recv_dsl */ @@ -270,6 +271,12 @@ struct iscsi_session { * and recv lock. */ struct mutex eh_mutex; + /* abort */ + wait_queue_head_t ehwait; /* used in eh_abort() */ + struct iscsi_tm tmhdr; + struct timer_list tmf_timer; + int tmf_state; /* see TMF_INITIAL, etc.*/ + struct iscsi_task *running_aborted_task; /* iSCSI session-wide sequencing */ uint32_t cmdsn; @@ -424,6 +431,7 @@ extern int iscsi_conn_start(struct iscsi_cls_conn *); extern void iscsi_conn_stop(struct iscsi_cls_conn *, int); extern int iscsi_conn_bind(struct iscsi_cls_session *, struct iscsi_cls_conn *, int); +extern void iscsi_conn_unbind(struct iscsi_cls_conn *cls_conn, bool is_active); extern void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err); extern void iscsi_session_failure(struct iscsi_session *session, enum iscsi_err err); diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h index 9271d7a49b..6fe125a71b 100644 --- a/include/scsi/libsas.h +++ b/include/scsi/libsas.h @@ -474,10 +474,16 @@ enum service_response { }; enum exec_status { - /* The SAM_STAT_.. codes fit in the lower 6 bits, alias some of - * them here to silence 'case value not in enumerated type' warnings + /* + * Values 0..0x7f are used to return the SAM_STAT_* codes. To avoid + * 'case value not in enumerated type' compiler warnings every value + * returned through the exec_status enum needs an alias with the SAS_ + * prefix here. */ - __SAM_STAT_CHECK_CONDITION = SAM_STAT_CHECK_CONDITION, + SAS_SAM_STAT_GOOD = SAM_STAT_GOOD, + SAS_SAM_STAT_BUSY = SAM_STAT_BUSY, + SAS_SAM_STAT_TASK_ABORTED = SAM_STAT_TASK_ABORTED, + SAS_SAM_STAT_CHECK_CONDITION = SAM_STAT_CHECK_CONDITION, SAS_DEV_NO_RESPONSE = 0x80, SAS_DATA_UNDERRUN, diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h index 246ced4016..3e46859774 100644 --- a/include/scsi/scsi.h +++ b/include/scsi/scsi.h @@ -11,6 +11,7 @@ #include #include #include +#include struct scsi_cmnd; @@ -30,32 +31,6 @@ enum scsi_timeouts { */ #define SCAN_WILD_CARD ~0 -/** scsi_status_is_good - check the status return. - * - * @status: the status passed up from the driver (including host and - * driver components) - * - * This returns true for known good conditions that may be treated as - * command completed normally - */ -static inline int scsi_status_is_good(int status) -{ - /* - * FIXME: bit0 is listed as reserved in SCSI-2, but is - * significant in SCSI-3. For now, we follow the SCSI-2 - * behaviour and ignore reserved bits. - */ - status &= 0xfe; - return ((status == SAM_STAT_GOOD) || - (status == SAM_STAT_CONDITION_MET) || - /* Next two "intermediate" statuses are obsolete in SAM-4 */ - (status == SAM_STAT_INTERMEDIATE) || - (status == SAM_STAT_INTERMEDIATE_CONDITION_MET) || - /* FIXME: this is obsolete in SAM-3 */ - (status == SAM_STAT_COMMAND_TERMINATED)); -} - - /* * standard mode-select header prepended to all mode-select commands */ @@ -88,94 +63,31 @@ static inline int scsi_is_wlun(u64 lun) return (lun & 0xff00) == SCSI_W_LUN_BASE; } +/** + * scsi_status_is_check_condition - check the status return. + * + * @status: the status passed up from the driver (including host and + * driver components) + * + * This returns true if the status code is SAM_STAT_CHECK_CONDITION. + */ +static inline int scsi_status_is_check_condition(int status) +{ + if (status < 0) + return false; + status &= 0xfe; + return status == SAM_STAT_CHECK_CONDITION; +} /* - * MESSAGE CODES + * Extended message codes. */ - -#define COMMAND_COMPLETE 0x00 -#define EXTENDED_MESSAGE 0x01 #define EXTENDED_MODIFY_DATA_POINTER 0x00 #define EXTENDED_SDTR 0x01 #define EXTENDED_EXTENDED_IDENTIFY 0x02 /* SCSI-I only */ #define EXTENDED_WDTR 0x03 #define EXTENDED_PPR 0x04 #define EXTENDED_MODIFY_BIDI_DATA_PTR 0x05 -#define SAVE_POINTERS 0x02 -#define RESTORE_POINTERS 0x03 -#define DISCONNECT 0x04 -#define INITIATOR_ERROR 0x05 -#define ABORT_TASK_SET 0x06 -#define MESSAGE_REJECT 0x07 -#define NOP 0x08 -#define MSG_PARITY_ERROR 0x09 -#define LINKED_CMD_COMPLETE 0x0a -#define LINKED_FLG_CMD_COMPLETE 0x0b -#define TARGET_RESET 0x0c -#define ABORT_TASK 0x0d -#define CLEAR_TASK_SET 0x0e -#define INITIATE_RECOVERY 0x0f /* SCSI-II only */ -#define RELEASE_RECOVERY 0x10 /* SCSI-II only */ -#define TERMINATE_IO_PROC 0x11 /* SCSI-II only */ -#define CLEAR_ACA 0x16 -#define LOGICAL_UNIT_RESET 0x17 -#define SIMPLE_QUEUE_TAG 0x20 -#define HEAD_OF_QUEUE_TAG 0x21 -#define ORDERED_QUEUE_TAG 0x22 -#define IGNORE_WIDE_RESIDUE 0x23 -#define ACA 0x24 -#define QAS_REQUEST 0x55 - -/* Old SCSI2 names, don't use in new code */ -#define BUS_DEVICE_RESET TARGET_RESET -#define ABORT ABORT_TASK_SET - -/* - * Host byte codes - */ - -#define DID_OK 0x00 /* NO error */ -#define DID_NO_CONNECT 0x01 /* Couldn't connect before timeout period */ -#define DID_BUS_BUSY 0x02 /* BUS stayed busy through time out period */ -#define DID_TIME_OUT 0x03 /* TIMED OUT for other reason */ -#define DID_BAD_TARGET 0x04 /* BAD target. */ -#define DID_ABORT 0x05 /* Told to abort for some other reason */ -#define DID_PARITY 0x06 /* Parity error */ -#define DID_ERROR 0x07 /* Internal error */ -#define DID_RESET 0x08 /* Reset by somebody. */ -#define DID_BAD_INTR 0x09 /* Got an interrupt we weren't expecting. */ -#define DID_PASSTHROUGH 0x0a /* Force command past mid-layer */ -#define DID_SOFT_ERROR 0x0b /* The low level driver just wish a retry */ -#define DID_IMM_RETRY 0x0c /* Retry without decrementing retry count */ -#define DID_REQUEUE 0x0d /* Requeue command (no immediate retry) also - * without decrementing the retry count */ -#define DID_TRANSPORT_DISRUPTED 0x0e /* Transport error disrupted execution - * and the driver blocked the port to - * recover the link. Transport class will - * retry or fail IO */ -#define DID_TRANSPORT_FAILFAST 0x0f /* Transport class fastfailed the io */ -#define DID_TARGET_FAILURE 0x10 /* Permanent target failure, do not retry on - * other paths */ -#define DID_NEXUS_FAILURE 0x11 /* Permanent nexus failure, retry on other - * paths might yield different results */ -#define DID_ALLOC_FAILURE 0x12 /* Space allocation on the device failed */ -#define DID_MEDIUM_ERROR 0x13 /* Medium error */ -#define DID_TRANSPORT_MARGINAL 0x14 /* Transport marginal errors */ -#define DRIVER_OK 0x00 /* Driver status */ - -/* - * These indicate the error that occurred, and what is available. - */ - -#define DRIVER_BUSY 0x01 -#define DRIVER_SOFT 0x02 -#define DRIVER_MEDIA 0x03 -#define DRIVER_ERROR 0x04 - -#define DRIVER_INVALID 0x05 -#define DRIVER_TIMEOUT 0x06 -#define DRIVER_HARD 0x07 -#define DRIVER_SENSE 0x08 /* * Internal return values. @@ -206,14 +118,10 @@ enum scsi_disposition { * These are set by: * * status byte = set from target device - * msg_byte = return status from host adapter itself. + * msg_byte (unused) * host_byte = set by low-level driver to indicate status. - * driver_byte = set by mid-level. */ -#define status_byte(result) (((result) >> 1) & 0x7f) -#define msg_byte(result) (((result) >> 8) & 0xff) #define host_byte(result) (((result) >> 16) & 0xff) -#define driver_byte(result) (((result) >> 24) & 0xff) #define sense_class(sense) (((sense) >> 4) & 0x7) #define sense_error(sense) ((sense) & 0xf) @@ -277,4 +185,35 @@ enum scsi_disposition { /* Used to obtain the PCI location of a device */ #define SCSI_IOCTL_GET_PCI 0x5387 +/** scsi_status_is_good - check the status return. + * + * @status: the status passed up from the driver (including host and + * driver components) + * + * This returns true for known good conditions that may be treated as + * command completed normally + */ +static inline bool scsi_status_is_good(int status) +{ + if (status < 0) + return false; + + if (host_byte(status) == DID_NO_CONNECT) + return false; + + /* + * FIXME: bit0 is listed as reserved in SCSI-2, but is + * significant in SCSI-3. For now, we follow the SCSI-2 + * behaviour and ignore reserved bits. + */ + status &= 0xfe; + return ((status == SAM_STAT_GOOD) || + (status == SAM_STAT_CONDITION_MET) || + /* Next two "intermediate" statuses are obsolete in SAM-4 */ + (status == SAM_STAT_INTERMEDIATE) || + (status == SAM_STAT_INTERMEDIATE_CONDITION_MET) || + /* FIXME: this is obsolete in SAM-3 */ + (status == SAM_STAT_COMMAND_TERMINATED)); +} + #endif /* _SCSI_SCSI_H */ diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h index fed024f4c0..779a59fe86 100644 --- a/include/scsi/scsi_cmnd.h +++ b/include/scsi/scsi_cmnd.h @@ -315,9 +315,9 @@ static inline void set_status_byte(struct scsi_cmnd *cmd, char status) cmd->result = (cmd->result & 0xffffff00) | status; } -static inline void set_msg_byte(struct scsi_cmnd *cmd, char status) +static inline u8 get_status_byte(struct scsi_cmnd *cmd) { - cmd->result = (cmd->result & 0xffff00ff) | (status << 8); + return cmd->result & 0xff; } static inline void set_host_byte(struct scsi_cmnd *cmd, char status) @@ -325,9 +325,36 @@ static inline void set_host_byte(struct scsi_cmnd *cmd, char status) cmd->result = (cmd->result & 0xff00ffff) | (status << 16); } -static inline void set_driver_byte(struct scsi_cmnd *cmd, char status) +static inline u8 get_host_byte(struct scsi_cmnd *cmd) { - cmd->result = (cmd->result & 0x00ffffff) | (status << 24); + return (cmd->result >> 16) & 0xff; +} + +/** + * scsi_msg_to_host_byte() - translate message byte + * + * Translate the SCSI parallel message byte to a matching + * host byte setting. A message of COMMAND_COMPLETE indicates + * a successful command execution, any other message indicate + * an error. As the messages themselves only have a meaning + * for the SCSI parallel protocol this function translates + * them into a matching host byte value for SCSI EH. + */ +static inline void scsi_msg_to_host_byte(struct scsi_cmnd *cmd, u8 msg) +{ + switch (msg) { + case COMMAND_COMPLETE: + break; + case ABORT_TASK_SET: + set_host_byte(cmd, DID_ABORT); + break; + case TARGET_RESET: + set_host_byte(cmd, DID_RESET); + break; + default: + set_host_byte(cmd, DID_ERROR); + break; + } } static inline unsigned scsi_transfer_length(struct scsi_cmnd *scmd) @@ -341,4 +368,7 @@ static inline unsigned scsi_transfer_length(struct scsi_cmnd *scmd) return xfer_len; } +extern void scsi_build_sense(struct scsi_cmnd *scmd, int desc, + u8 key, u8 asc, u8 ascq); + #endif /* _SCSI_SCSI_CMND_H */ diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h index d0bf88d77f..75363707b7 100644 --- a/include/scsi/scsi_host.h +++ b/include/scsi/scsi_host.h @@ -764,7 +764,7 @@ extern void scsi_host_put(struct Scsi_Host *t); extern struct Scsi_Host *scsi_host_lookup(unsigned short); extern const char *scsi_host_state_name(enum scsi_host_state); extern void scsi_host_complete_all_commands(struct Scsi_Host *shost, - int status); + enum scsi_host_status status); static inline int __must_check scsi_add_host(struct Scsi_Host *host, struct device *dev) diff --git a/include/scsi/scsi_proto.h b/include/scsi/scsi_proto.h index c368601119..f017843a81 100644 --- a/include/scsi/scsi_proto.h +++ b/include/scsi/scsi_proto.h @@ -190,39 +190,21 @@ struct scsi_varlen_cdb_hdr { * SCSI Architecture Model (SAM) Status codes. Taken from SAM-3 draft * T10/1561-D Revision 4 Draft dated 7th November 2002. */ -#define SAM_STAT_GOOD 0x00 -#define SAM_STAT_CHECK_CONDITION 0x02 -#define SAM_STAT_CONDITION_MET 0x04 -#define SAM_STAT_BUSY 0x08 -#define SAM_STAT_INTERMEDIATE 0x10 -#define SAM_STAT_INTERMEDIATE_CONDITION_MET 0x14 -#define SAM_STAT_RESERVATION_CONFLICT 0x18 -#define SAM_STAT_COMMAND_TERMINATED 0x22 /* obsolete in SAM-3 */ -#define SAM_STAT_TASK_SET_FULL 0x28 -#define SAM_STAT_ACA_ACTIVE 0x30 -#define SAM_STAT_TASK_ABORTED 0x40 - -/* - * Status codes. These are deprecated as they are shifted 1 bit right - * from those found in the SCSI standards. This causes confusion for - * applications that are ported to several OSes. Prefer SAM Status codes - * above. - */ - -#define GOOD 0x00 -#define CHECK_CONDITION 0x01 -#define CONDITION_GOOD 0x02 -#define BUSY 0x04 -#define INTERMEDIATE_GOOD 0x08 -#define INTERMEDIATE_C_GOOD 0x0a -#define RESERVATION_CONFLICT 0x0c -#define COMMAND_TERMINATED 0x11 -#define QUEUE_FULL 0x14 -#define ACA_ACTIVE 0x18 -#define TASK_ABORTED 0x20 - -#define STATUS_MASK 0xfe +enum sam_status { + SAM_STAT_GOOD = 0x00, + SAM_STAT_CHECK_CONDITION = 0x02, + SAM_STAT_CONDITION_MET = 0x04, + SAM_STAT_BUSY = 0x08, + SAM_STAT_INTERMEDIATE = 0x10, + SAM_STAT_INTERMEDIATE_CONDITION_MET = 0x14, + SAM_STAT_RESERVATION_CONFLICT = 0x18, + SAM_STAT_COMMAND_TERMINATED = 0x22, /* obsolete in SAM-3 */ + SAM_STAT_TASK_SET_FULL = 0x28, + SAM_STAT_ACA_ACTIVE = 0x30, + SAM_STAT_TASK_ABORTED = 0x40, +}; +#define STATUS_MASK 0xfe /* * SENSE KEYS */ @@ -341,4 +323,16 @@ enum zbc_zone_cond { ZBC_ZONE_COND_OFFLINE = 0xf, }; +/* Version descriptor values for INQUIRY */ +enum scsi_version_descriptor { + SCSI_VERSION_DESCRIPTOR_FCP4 = 0x0a40, + SCSI_VERSION_DESCRIPTOR_ISCSI = 0x0960, + SCSI_VERSION_DESCRIPTOR_SAM5 = 0x00a0, + SCSI_VERSION_DESCRIPTOR_SAS3 = 0x0c60, + SCSI_VERSION_DESCRIPTOR_SBC3 = 0x04c0, + SCSI_VERSION_DESCRIPTOR_SBP3 = 0x0980, + SCSI_VERSION_DESCRIPTOR_SPC4 = 0x0460, + SCSI_VERSION_DESCRIPTOR_SRP = 0x0940 +}; + #endif /* _SCSI_PROTO_H_ */ diff --git a/include/scsi/scsi_status.h b/include/scsi/scsi_status.h new file mode 100644 index 0000000000..31d30cee18 --- /dev/null +++ b/include/scsi/scsi_status.h @@ -0,0 +1,74 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _SCSI_SCSI_STATUS_H +#define _SCSI_SCSI_STATUS_H + +#include +#include + +/* Message codes. */ +enum scsi_msg_byte { + COMMAND_COMPLETE = 0x00, + EXTENDED_MESSAGE = 0x01, + SAVE_POINTERS = 0x02, + RESTORE_POINTERS = 0x03, + DISCONNECT = 0x04, + INITIATOR_ERROR = 0x05, + ABORT_TASK_SET = 0x06, + MESSAGE_REJECT = 0x07, + NOP = 0x08, + MSG_PARITY_ERROR = 0x09, + LINKED_CMD_COMPLETE = 0x0a, + LINKED_FLG_CMD_COMPLETE = 0x0b, + TARGET_RESET = 0x0c, + ABORT_TASK = 0x0d, + CLEAR_TASK_SET = 0x0e, + INITIATE_RECOVERY = 0x0f, /* SCSI-II only */ + RELEASE_RECOVERY = 0x10, /* SCSI-II only */ + TERMINATE_IO_PROC = 0x11, /* SCSI-II only */ + CLEAR_ACA = 0x16, + LOGICAL_UNIT_RESET = 0x17, + SIMPLE_QUEUE_TAG = 0x20, + HEAD_OF_QUEUE_TAG = 0x21, + ORDERED_QUEUE_TAG = 0x22, + IGNORE_WIDE_RESIDUE = 0x23, + ACA = 0x24, + QAS_REQUEST = 0x55, + + /* Old SCSI2 names, don't use in new code */ + BUS_DEVICE_RESET = TARGET_RESET, + ABORT = ABORT_TASK_SET, +}; + +/* Host byte codes. */ +enum scsi_host_status { + DID_OK = 0x00, /* NO error */ + DID_NO_CONNECT = 0x01, /* Couldn't connect before timeout period */ + DID_BUS_BUSY = 0x02, /* BUS stayed busy through time out period */ + DID_TIME_OUT = 0x03, /* TIMED OUT for other reason */ + DID_BAD_TARGET = 0x04, /* BAD target. */ + DID_ABORT = 0x05, /* Told to abort for some other reason */ + DID_PARITY = 0x06, /* Parity error */ + DID_ERROR = 0x07, /* Internal error */ + DID_RESET = 0x08, /* Reset by somebody. */ + DID_BAD_INTR = 0x09, /* Got an interrupt we weren't expecting. */ + DID_PASSTHROUGH = 0x0a, /* Force command past mid-layer */ + DID_SOFT_ERROR = 0x0b, /* The low level driver just wish a retry */ + DID_IMM_RETRY = 0x0c, /* Retry without decrementing retry count */ + DID_REQUEUE = 0x0d, /* Requeue command (no immediate retry) also + * without decrementing the retry count */ + DID_TRANSPORT_DISRUPTED = 0x0e, /* Transport error disrupted execution + * and the driver blocked the port to + * recover the link. Transport class will + * retry or fail IO */ + DID_TRANSPORT_FAILFAST = 0x0f, /* Transport class fastfailed the io */ + DID_TARGET_FAILURE = 0x10, /* Permanent target failure, do not retry on + * other paths */ + DID_NEXUS_FAILURE = 0x11, /* Permanent nexus failure, retry on other + * paths might yield different results */ + DID_ALLOC_FAILURE = 0x12, /* Space allocation on the device failed */ + DID_MEDIUM_ERROR = 0x13, /* Medium error */ + DID_TRANSPORT_MARGINAL = 0x14, /* Transport marginal errors */ +}; + +#endif /* _SCSI_SCSI_STATUS_H */ diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h index 14214ee121..e80a7c542c 100644 --- a/include/scsi/scsi_transport_fc.h +++ b/include/scsi/scsi_transport_fc.h @@ -517,10 +517,11 @@ enum fc_host_event_code { * managed by the transport w/o driver interaction. */ +#define FC_VENDOR_IDENTIFIER 8 #define FC_FC4_LIST_SIZE 32 #define FC_SYMBOLIC_NAME_SIZE 256 #define FC_VERSION_STRING_SIZE 64 -#define FC_SERIAL_NUMBER_SIZE 80 +#define FC_SERIAL_NUMBER_SIZE 64 struct fc_host_attrs { /* Fixed Attributes */ @@ -532,6 +533,10 @@ struct fc_host_attrs { u32 supported_speeds; u32 maxframe_size; u16 max_npiv_vports; + u32 max_ct_payload; + u32 num_ports; + u32 num_discovered_ports; + u32 bootbios_state; char serial_number[FC_SERIAL_NUMBER_SIZE]; char manufacturer[FC_SERIAL_NUMBER_SIZE]; char model[FC_SYMBOLIC_NAME_SIZE]; @@ -540,6 +545,9 @@ struct fc_host_attrs { char driver_version[FC_VERSION_STRING_SIZE]; char firmware_version[FC_VERSION_STRING_SIZE]; char optionrom_version[FC_VERSION_STRING_SIZE]; + char vendor_identifier[FC_VENDOR_IDENTIFIER]; + char bootbios_version[FC_SYMBOLIC_NAME_SIZE]; + /* Dynamic Attributes */ u32 port_id; @@ -573,6 +581,9 @@ struct fc_host_attrs { /* bsg support */ struct request_queue *rqst_q; + + /* FDMI support version*/ + u8 fdmi_version; }; #define shost_to_fc_host(x) \ @@ -652,6 +663,18 @@ struct fc_host_attrs { (((struct fc_host_attrs *)(x)->shost_data)->devloss_work_q) #define fc_host_dev_loss_tmo(x) \ (((struct fc_host_attrs *)(x)->shost_data)->dev_loss_tmo) +#define fc_host_max_ct_payload(x) \ + (((struct fc_host_attrs *)(x)->shost_data)->max_ct_payload) +#define fc_host_vendor_identifier(x) \ + (((struct fc_host_attrs *)(x)->shost_data)->vendor_identifier) +#define fc_host_num_discovered_ports(x) \ + (((struct fc_host_attrs *)(x)->shost_data)->num_discovered_ports) +#define fc_host_num_ports(x) \ + (((struct fc_host_attrs *)(x)->shost_data)->num_ports) +#define fc_host_bootbios_version(x) \ + (((struct fc_host_attrs *)(x)->shost_data)->bootbios_version) +#define fc_host_bootbios_state(x) \ + (((struct fc_host_attrs *)(x)->shost_data)->bootbios_state) /* The functions by which the transport class and the driver communicate */ struct fc_function_template { diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h index fc5a39839b..c5d7810fd7 100644 --- a/include/scsi/scsi_transport_iscsi.h +++ b/include/scsi/scsi_transport_iscsi.h @@ -82,6 +82,7 @@ struct iscsi_transport { void (*destroy_session) (struct iscsi_cls_session *session); struct iscsi_cls_conn *(*create_conn) (struct iscsi_cls_session *sess, uint32_t cid); + void (*unbind_conn) (struct iscsi_cls_conn *conn, bool is_active); int (*bind_conn) (struct iscsi_cls_session *session, struct iscsi_cls_conn *cls_conn, uint64_t transport_eph, int is_leading); @@ -196,15 +197,23 @@ enum iscsi_connection_state { ISCSI_CONN_BOUND, }; +#define ISCSI_CLS_CONN_BIT_CLEANUP 1 + struct iscsi_cls_conn { struct list_head conn_list; /* item in connlist */ - struct list_head conn_list_err; /* item in connlist_err */ void *dd_data; /* LLD private data */ struct iscsi_transport *transport; uint32_t cid; /* connection id */ + /* + * This protects the conn startup and binding/unbinding of the ep to + * the conn. Unbinding includes ep_disconnect and stop_conn. + */ struct mutex ep_mutex; struct iscsi_endpoint *ep; + unsigned long flags; + struct work_struct cleanup_work; + struct device dev; /* sysfs transport/container device */ enum iscsi_connection_state state; }; @@ -434,6 +443,8 @@ extern void iscsi_remove_session(struct iscsi_cls_session *session); extern void iscsi_free_session(struct iscsi_cls_session *session); extern struct iscsi_cls_conn *iscsi_create_conn(struct iscsi_cls_session *sess, int dd_size, uint32_t cid); +extern void iscsi_put_conn(struct iscsi_cls_conn *conn); +extern void iscsi_get_conn(struct iscsi_cls_conn *conn); extern int iscsi_destroy_conn(struct iscsi_cls_conn *conn); extern void iscsi_unblock_session(struct iscsi_cls_session *session); extern void iscsi_block_session(struct iscsi_cls_session *session); @@ -441,6 +452,7 @@ extern int iscsi_scan_finished(struct Scsi_Host *shost, unsigned long time); extern struct iscsi_endpoint *iscsi_create_endpoint(int dd_size); extern void iscsi_destroy_endpoint(struct iscsi_endpoint *ep); extern struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle); +extern void iscsi_put_endpoint(struct iscsi_endpoint *ep); extern int iscsi_block_scsi_eh(struct scsi_cmnd *cmd); extern struct iscsi_iface *iscsi_create_iface(struct Scsi_Host *shost, struct iscsi_transport *t, diff --git a/include/scsi/sg.h b/include/scsi/sg.h index 7327e12f33..843cefb8ef 100644 --- a/include/scsi/sg.h +++ b/include/scsi/sg.h @@ -131,6 +131,39 @@ struct compat_sg_io_hdr { #define SG_INFO_DIRECT_IO 0x2 /* direct IO requested and performed */ #define SG_INFO_MIXED_IO 0x4 /* part direct, part indirect IO */ +/* + * Obsolete DRIVER_SENSE driver byte + * + * Originally the SCSI midlayer would set the DRIVER_SENSE driver byte when + * a sense code was generated and a sense buffer was allocated. + * However, as nowadays every scsi command has a sense code allocated this + * distinction became moot as one could check the sense buffer directly. + * Consequently this byte is not set anymore from the midlayer, but SG will + * keep setting this byte to be compatible with previous releases. + */ +#define DRIVER_SENSE 0x08 +/* Obsolete driver_byte() declaration */ +#define driver_byte(result) (((result) >> 24) & 0xff) + +/* + * Original linux SCSI Status codes. They are shifted 1 bit right + * from those found in the SCSI standards. + */ + +#define GOOD 0x00 +#define CHECK_CONDITION 0x01 +#define CONDITION_GOOD 0x02 +#define BUSY 0x04 +#define INTERMEDIATE_GOOD 0x08 +#define INTERMEDIATE_C_GOOD 0x0a +#define RESERVATION_CONFLICT 0x0c +#define COMMAND_TERMINATED 0x11 +#define QUEUE_FULL 0x14 +#define ACA_ACTIVE 0x18 +#define TASK_ABORTED 0x20 + +/* Obsolete status_byte() declaration */ +#define status_byte(result) (((result) >> 1) & 0x7f) typedef struct sg_scsi_id { /* used by SG_GET_SCSI_ID ioctl() */ int host_no; /* as in "scsi" where 'n' is one of 0, 1, 2 etc */ @@ -145,7 +178,7 @@ typedef struct sg_scsi_id { /* used by SG_GET_SCSI_ID ioctl() */ typedef struct sg_req_info { /* used by SG_GET_REQUEST_TABLE ioctl() */ char req_state; /* 0 -> not used, 1 -> written, 2 -> ready to read */ - char orphan; /* 0 -> normal request, 1 -> from interruped SG_IO */ + char orphan; /* 0 -> normal request, 1 -> from interrupted SG_IO */ char sg_io_owned; /* 0 -> complete with read(), 1 -> owned by SG_IO */ char problem; /* 0 -> no problem detected, 1 -> error to report */ int pack_id; /* pack_id associated with request */ diff --git a/include/scsi/srp.h b/include/scsi/srp.h index 177d8026e9..dfe0984b58 100644 --- a/include/scsi/srp.h +++ b/include/scsi/srp.h @@ -107,10 +107,10 @@ struct srp_direct_buf { * having the 20-byte structure padded to 24 bytes on 64-bit architectures. */ struct srp_indirect_buf { - struct srp_direct_buf table_desc; + struct srp_direct_buf table_desc __packed __aligned(4); __be32 len; - struct srp_direct_buf desc_list[]; -} __attribute__((packed)); + struct srp_direct_buf desc_list[] __packed __aligned(4); +}; /* Immediate data buffer descriptor as defined in SRP2. */ struct srp_imm_buf { @@ -175,13 +175,13 @@ struct srp_login_rsp { u8 opcode; u8 reserved1[3]; __be32 req_lim_delta; - u64 tag; + u64 tag __packed __aligned(4); __be32 max_it_iu_len; __be32 max_ti_iu_len; __be16 buf_fmt; u8 rsp_flags; u8 reserved2[25]; -} __attribute__((packed)); +}; struct srp_login_rej { u8 opcode; @@ -207,10 +207,6 @@ struct srp_t_logout { u64 tag; }; -/* - * We need the packed attribute because the SRP spec only aligns the - * 8-byte LUN field to 4 bytes. - */ struct srp_tsk_mgmt { u8 opcode; u8 sol_not; @@ -225,10 +221,6 @@ struct srp_tsk_mgmt { u8 reserved5[8]; }; -/* - * We need the packed attribute because the SRP spec only aligns the - * 8-byte LUN field to 4 bytes. - */ struct srp_cmd { u8 opcode; u8 sol_not; @@ -266,7 +258,7 @@ struct srp_rsp { u8 sol_not; u8 reserved1[2]; __be32 req_lim_delta; - u64 tag; + u64 tag __packed __aligned(4); u8 reserved2[2]; u8 flags; u8 status; @@ -275,7 +267,7 @@ struct srp_rsp { __be32 sense_data_len; __be32 resp_data_len; u8 data[]; -} __attribute__((packed)); +}; struct srp_cred_req { u8 opcode; @@ -301,13 +293,13 @@ struct srp_aer_req { u8 sol_not; u8 reserved[2]; __be32 req_lim_delta; - u64 tag; + u64 tag __packed __aligned(4); u32 reserved2; struct scsi_lun lun; __be32 sense_data_len; u32 reserved3; u8 sense_data[]; -} __attribute__((packed)); +}; struct srp_aer_rsp { u8 opcode; diff --git a/include/soc/bcm2835/raspberrypi-firmware.h b/include/soc/bcm2835/raspberrypi-firmware.h index ea8ac1dfb8..dd3bbc75e5 100644 --- a/include/soc/bcm2835/raspberrypi-firmware.h +++ b/include/soc/bcm2835/raspberrypi-firmware.h @@ -9,8 +9,6 @@ #include #include -#define RPI_FIRMWARE_CHAN_FB 1 - struct rpi_firmware; enum rpi_firmware_property_status { @@ -198,6 +196,5 @@ static inline struct rpi_firmware *devm_rpi_firmware_get(struct device *dev, return NULL; } #endif -int rpi_firmware_transaction(struct rpi_firmware *fw, u32 chan, u32 data); #endif /* __SOC_RASPBERRY_FIRMWARE_H__ */ diff --git a/include/soc/imx/cpu.h b/include/soc/imx/cpu.h index 42d6aeb951..0bf610acaf 100644 --- a/include/soc/imx/cpu.h +++ b/include/soc/imx/cpu.h @@ -9,6 +9,7 @@ #define MXC_CPU_MX27 27 #define MXC_CPU_MX31 31 #define MXC_CPU_MX35 35 +#define MXC_CPU_MX50 50 #define MXC_CPU_MX51 51 #define MXC_CPU_MX53 53 #define MXC_CPU_IMX6SL 0x60 diff --git a/include/soc/microchip/mpfs.h b/include/soc/microchip/mpfs.h new file mode 100644 index 0000000000..2b64c95f3b --- /dev/null +++ b/include/soc/microchip/mpfs.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * + * Microchip PolarFire SoC (MPFS) + * + * Copyright (c) 2020 Microchip Corporation. All rights reserved. + * + * Author: Conor Dooley + * + */ + +#ifndef __SOC_MPFS_H__ +#define __SOC_MPFS_H__ + +#include +#include + +struct mpfs_sys_controller; + +struct mpfs_mss_msg { + u8 cmd_opcode; + u16 cmd_data_size; + struct mpfs_mss_response *response; + u8 *cmd_data; + u16 mbox_offset; + u16 resp_offset; +}; + +struct mpfs_mss_response { + u32 resp_status; + u32 *resp_msg; + u16 resp_size; +}; + +#if IS_ENABLED(CONFIG_POLARFIRE_SOC_SYS_CTRL) + +int mpfs_blocking_transaction(struct mpfs_sys_controller *mpfs_client, void *msg); + +struct mpfs_sys_controller *mpfs_sys_controller_get(struct device_node *mailbox_node); + +#endif /* if IS_ENABLED(CONFIG_POLARFIRE_SOC_SYS_CTRL) */ + +#endif /* __SOC_MPFS_H__ */ diff --git a/include/soc/tegra/common.h b/include/soc/tegra/common.h index 98027a76ce..af41ad80ec 100644 --- a/include/soc/tegra/common.h +++ b/include/soc/tegra/common.h @@ -6,6 +6,37 @@ #ifndef __SOC_TEGRA_COMMON_H__ #define __SOC_TEGRA_COMMON_H__ +#include +#include + +struct device; + +/** + * Tegra SoC core device OPP table configuration + * + * @init_state: pre-initialize OPP state of a device + */ +struct tegra_core_opp_params { + bool init_state; +}; + +#ifdef CONFIG_ARCH_TEGRA bool soc_is_tegra(void); +int devm_tegra_core_dev_init_opp_table(struct device *dev, + struct tegra_core_opp_params *params); +#else +static inline bool soc_is_tegra(void) +{ + return false; +} + +static inline int +devm_tegra_core_dev_init_opp_table(struct device *dev, + struct tegra_core_opp_params *params) +{ + return -ENODEV; +} +#endif + #endif /* __SOC_TEGRA_COMMON_H__ */ diff --git a/include/soc/tegra/fuse.h b/include/soc/tegra/fuse.h index 78cbc787a4..990701f788 100644 --- a/include/soc/tegra/fuse.h +++ b/include/soc/tegra/fuse.h @@ -52,14 +52,28 @@ struct tegra_sku_info { enum tegra_revision revision; }; +#ifdef CONFIG_ARCH_TEGRA +extern struct tegra_sku_info tegra_sku_info; u32 tegra_read_straps(void); u32 tegra_read_ram_code(void); int tegra_fuse_readl(unsigned long offset, u32 *value); - -#ifdef CONFIG_ARCH_TEGRA -extern struct tegra_sku_info tegra_sku_info; #else static struct tegra_sku_info tegra_sku_info __maybe_unused; + +static inline u32 tegra_read_straps(void) +{ + return 0; +} + +static inline u32 tegra_read_ram_code(void) +{ + return 0; +} + +static inline int tegra_fuse_readl(unsigned long offset, u32 *value) +{ + return -ENODEV; +} #endif struct device *tegra_soc_device_register(void); diff --git a/include/soc/tegra/mc.h b/include/soc/tegra/mc.h index d2fbe6a8b2..1066b1194a 100644 --- a/include/soc/tegra/mc.h +++ b/include/soc/tegra/mc.h @@ -10,6 +10,7 @@ #include #include #include +#include #include #include @@ -17,34 +18,48 @@ struct clk; struct device; struct page; -struct tegra_smmu_enable { - unsigned int reg; - unsigned int bit; -}; - struct tegra_mc_timing { unsigned long rate; u32 *emem_data; }; -/* latency allowance */ -struct tegra_mc_la { - unsigned int reg; - unsigned int shift; - unsigned int mask; - unsigned int def; -}; - struct tegra_mc_client { unsigned int id; const char *name; - unsigned int swgroup; + /* + * For Tegra210 and earlier, this is the SWGROUP ID used for IOVA translations in the + * Tegra SMMU, whereas on Tegra186 and later this is the ID used to override the ARM SMMU + * stream ID used for IOVA translations for the given memory client. + */ + union { + unsigned int swgroup; + unsigned int sid; + }; unsigned int fifo_size; - struct tegra_smmu_enable smmu; - struct tegra_mc_la la; + struct { + /* Tegra SMMU enable (Tegra210 and earlier) */ + struct { + unsigned int reg; + unsigned int bit; + } smmu; + + /* latency allowance */ + struct { + unsigned int reg; + unsigned int shift; + unsigned int mask; + unsigned int def; + } la; + + /* stream ID overrides (Tegra186 and later) */ + struct { + unsigned int override; + unsigned int security; + } sid; + } regs; }; struct tegra_smmu_swgroup { @@ -155,6 +170,19 @@ struct tegra_mc_icc_ops { void *data); }; +struct tegra_mc_ops { + /* + * @probe: Callback to set up SoC-specific bits of the memory controller. This is called + * after basic, common set up that is done by the SoC-agnostic bits. + */ + int (*probe)(struct tegra_mc *mc); + void (*remove)(struct tegra_mc *mc); + int (*suspend)(struct tegra_mc *mc); + int (*resume)(struct tegra_mc *mc); + irqreturn_t (*handle_irq)(int irq, void *data); + int (*probe_device)(struct tegra_mc *mc, struct device *dev); +}; + struct tegra_mc_soc { const struct tegra_mc_client *clients; unsigned int num_clients; @@ -176,8 +204,7 @@ struct tegra_mc_soc { unsigned int num_resets; const struct tegra_mc_icc_ops *icc_ops; - - int (*init)(struct tegra_mc *mc); + const struct tegra_mc_ops *ops; }; struct tegra_mc { @@ -210,12 +237,19 @@ unsigned int tegra_mc_get_emem_device_count(struct tegra_mc *mc); #ifdef CONFIG_TEGRA_MC struct tegra_mc *devm_tegra_memory_controller_get(struct device *dev); +int tegra_mc_probe_device(struct tegra_mc *mc, struct device *dev); #else static inline struct tegra_mc * devm_tegra_memory_controller_get(struct device *dev) { return ERR_PTR(-ENODEV); } + +static inline int +tegra_mc_probe_device(struct tegra_mc *mc, struct device *dev) +{ + return -ENODEV; +} #endif #endif /* __SOC_TEGRA_MC_H__ */ diff --git a/include/soc/tegra/pmc.h b/include/soc/tegra/pmc.h index 361cb64246..d186bccd12 100644 --- a/include/soc/tegra/pmc.h +++ b/include/soc/tegra/pmc.h @@ -171,6 +171,8 @@ int tegra_io_rail_power_off(unsigned int id); void tegra_pmc_set_suspend_mode(enum tegra_suspend_mode mode); void tegra_pmc_enter_suspend_mode(enum tegra_suspend_mode mode); +bool tegra_pmc_core_domain_state_synced(void); + #else static inline int tegra_powergate_power_on(unsigned int id) { @@ -227,6 +229,11 @@ static inline void tegra_pmc_enter_suspend_mode(enum tegra_suspend_mode mode) { } +static inline bool tegra_pmc_core_domain_state_synced(void) +{ + return false; +} + #endif /* CONFIG_SOC_TEGRA_PMC */ #if defined(CONFIG_SOC_TEGRA_PMC) && defined(CONFIG_PM_SLEEP) diff --git a/include/sound/core.h b/include/sound/core.h index 1f9aef0adb..c4ade12172 100644 --- a/include/sound/core.h +++ b/include/sound/core.h @@ -128,7 +128,9 @@ struct snd_card { #ifdef CONFIG_PM unsigned int power_state; /* power state */ + atomic_t power_ref; wait_queue_head_t power_sleep; + wait_queue_head_t power_ref_sleep; #endif #if IS_ENABLED(CONFIG_SND_MIXER_OSS) @@ -142,21 +144,61 @@ struct snd_card { #ifdef CONFIG_PM static inline unsigned int snd_power_get_state(struct snd_card *card) { - return card->power_state; + return READ_ONCE(card->power_state); } static inline void snd_power_change_state(struct snd_card *card, unsigned int state) { - card->power_state = state; + WRITE_ONCE(card->power_state, state); wake_up(&card->power_sleep); } +/** + * snd_power_ref - Take the reference count for power control + * @card: sound card object + * + * The power_ref reference of the card is used for managing to block + * the snd_power_sync_ref() operation. This function increments the reference. + * The counterpart snd_power_unref() has to be called appropriately later. + */ +static inline void snd_power_ref(struct snd_card *card) +{ + atomic_inc(&card->power_ref); +} + +/** + * snd_power_unref - Release the reference count for power control + * @card: sound card object + */ +static inline void snd_power_unref(struct snd_card *card) +{ + if (atomic_dec_and_test(&card->power_ref)) + wake_up(&card->power_ref_sleep); +} + +/** + * snd_power_sync_ref - wait until the card power_ref is freed + * @card: sound card object + * + * This function is used to synchronize with the pending power_ref being + * released. + */ +static inline void snd_power_sync_ref(struct snd_card *card) +{ + wait_event(card->power_ref_sleep, !atomic_read(&card->power_ref)); +} + /* init.c */ -int snd_power_wait(struct snd_card *card, unsigned int power_state); +int snd_power_wait(struct snd_card *card); +int snd_power_ref_and_wait(struct snd_card *card); #else /* ! CONFIG_PM */ -static inline int snd_power_wait(struct snd_card *card, unsigned int state) { return 0; } +static inline int snd_power_wait(struct snd_card *card) { return 0; } +static inline void snd_power_ref(struct snd_card *card) {} +static inline void snd_power_unref(struct snd_card *card) {} +static inline int snd_power_ref_and_wait(struct snd_card *card) { return 0; } +static inline void snd_power_sync_ref(struct snd_card *card) {} #define snd_power_get_state(card) ({ (void)(card); SNDRV_CTL_POWER_D0; }) #define snd_power_change_state(card, state) do { (void)(card); } while (0) diff --git a/include/sound/memalloc.h b/include/sound/memalloc.h index 5daa937684..44d87775b3 100644 --- a/include/sound/memalloc.h +++ b/include/sound/memalloc.h @@ -12,6 +12,7 @@ #include struct device; +struct vm_area_struct; /* * buffer device info @@ -64,84 +65,19 @@ static inline unsigned int snd_sgbuf_aligned_pages(size_t size) return (size + PAGE_SIZE - 1) >> PAGE_SHIFT; } -#ifdef CONFIG_SND_DMA_SGBUF -/* - * Scatter-Gather generic device pages - */ -void *snd_malloc_sgbuf_pages(struct device *device, - size_t size, struct snd_dma_buffer *dmab, - size_t *res_size); -int snd_free_sgbuf_pages(struct snd_dma_buffer *dmab); - -struct snd_sg_page { - void *buf; - dma_addr_t addr; -}; - -struct snd_sg_buf { - int size; /* allocated byte size */ - int pages; /* allocated pages */ - int tblsize; /* allocated table size */ - struct snd_sg_page *table; /* address table */ - struct page **page_table; /* page table (for vmap/vunmap) */ - struct device *dev; -}; - -/* - * return the physical address at the corresponding offset - */ -static inline dma_addr_t snd_sgbuf_get_addr(struct snd_dma_buffer *dmab, - size_t offset) -{ - struct snd_sg_buf *sgbuf = dmab->private_data; - dma_addr_t addr; - - if (!sgbuf) - return dmab->addr + offset; - addr = sgbuf->table[offset >> PAGE_SHIFT].addr; - addr &= ~((dma_addr_t)PAGE_SIZE - 1); - return addr + offset % PAGE_SIZE; -} - -/* - * return the virtual address at the corresponding offset - */ -static inline void *snd_sgbuf_get_ptr(struct snd_dma_buffer *dmab, - size_t offset) -{ - struct snd_sg_buf *sgbuf = dmab->private_data; - - if (!sgbuf) - return dmab->area + offset; - return sgbuf->table[offset >> PAGE_SHIFT].buf + offset % PAGE_SIZE; -} - -unsigned int snd_sgbuf_get_chunk_size(struct snd_dma_buffer *dmab, - unsigned int ofs, unsigned int size); -#else -/* non-SG versions */ -static inline dma_addr_t snd_sgbuf_get_addr(struct snd_dma_buffer *dmab, - size_t offset) -{ - return dmab->addr + offset; -} - -static inline void *snd_sgbuf_get_ptr(struct snd_dma_buffer *dmab, - size_t offset) -{ - return dmab->area + offset; -} - -#define snd_sgbuf_get_chunk_size(dmab, ofs, size) (size) - -#endif /* CONFIG_SND_DMA_SGBUF */ - /* allocate/release a buffer */ int snd_dma_alloc_pages(int type, struct device *dev, size_t size, struct snd_dma_buffer *dmab); int snd_dma_alloc_pages_fallback(int type, struct device *dev, size_t size, struct snd_dma_buffer *dmab); void snd_dma_free_pages(struct snd_dma_buffer *dmab); +int snd_dma_buffer_mmap(struct snd_dma_buffer *dmab, + struct vm_area_struct *area); + +dma_addr_t snd_sgbuf_get_addr(struct snd_dma_buffer *dmab, size_t offset); +struct page *snd_sgbuf_get_page(struct snd_dma_buffer *dmab, size_t offset); +unsigned int snd_sgbuf_get_chunk_size(struct snd_dma_buffer *dmab, + unsigned int ofs, unsigned int size); #endif /* __SOUND_MEMALLOC_H */ diff --git a/include/sound/pcm.h b/include/sound/pcm.h index 2e1200d17d..938f36050a 100644 --- a/include/sound/pcm.h +++ b/include/sound/pcm.h @@ -1066,6 +1066,7 @@ void snd_pcm_set_ops(struct snd_pcm * pcm, int direction, void snd_pcm_set_sync(struct snd_pcm_substream *substream); int snd_pcm_lib_ioctl(struct snd_pcm_substream *substream, unsigned int cmd, void *arg); +void snd_pcm_period_elapsed_under_stream_lock(struct snd_pcm_substream *substream); void snd_pcm_period_elapsed(struct snd_pcm_substream *substream); snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream, void *buf, bool interleaved, @@ -1253,14 +1254,6 @@ static inline int snd_pcm_lib_alloc_vmalloc_32_buffer #define snd_pcm_get_dma_buf(substream) ((substream)->runtime->dma_buffer_p) -#ifdef CONFIG_SND_DMA_SGBUF -/* - * SG-buffer handling - */ -#define snd_pcm_substream_sgbuf(substream) \ - snd_pcm_get_dma_buf(substream)->private_data -#endif /* SND_DMA_SGBUF */ - /** * snd_pcm_sgbuf_get_addr - Get the DMA address at the corresponding offset * @substream: PCM substream @@ -1272,17 +1265,6 @@ snd_pcm_sgbuf_get_addr(struct snd_pcm_substream *substream, unsigned int ofs) return snd_sgbuf_get_addr(snd_pcm_get_dma_buf(substream), ofs); } -/** - * snd_pcm_sgbuf_get_ptr - Get the virtual address at the corresponding offset - * @substream: PCM substream - * @ofs: byte offset - */ -static inline void * -snd_pcm_sgbuf_get_ptr(struct snd_pcm_substream *substream, unsigned int ofs) -{ - return snd_sgbuf_get_ptr(snd_pcm_get_dma_buf(substream), ofs); -} - /** * snd_pcm_sgbuf_get_chunk_size - Compute the max size that fits within the * contig. page from the given size diff --git a/include/sound/rawmidi.h b/include/sound/rawmidi.h index 334842daa9..989e151733 100644 --- a/include/sound/rawmidi.h +++ b/include/sound/rawmidi.h @@ -81,6 +81,8 @@ struct snd_rawmidi_substream { bool opened; /* open flag */ bool append; /* append flag (merge more streams) */ bool active_sensing; /* send active sensing when close */ + unsigned int framing; /* whether to frame input data */ + unsigned int clock_type; /* clock source to use for input framing */ int use_count; /* use counter (for output) */ size_t bytes; struct snd_rawmidi *rmidi; diff --git a/include/sound/soc-dai.h b/include/sound/soc-dai.h index 1358a0ceb4..0dcb361a98 100644 --- a/include/sound/soc-dai.h +++ b/include/sound/soc-dai.h @@ -36,6 +36,22 @@ struct snd_compr_stream; #define SND_SOC_DAIFMT_MSB SND_SOC_DAIFMT_LEFT_J #define SND_SOC_DAIFMT_LSB SND_SOC_DAIFMT_RIGHT_J +/* Describes the possible PCM format */ +/* + * use SND_SOC_DAI_FORMAT_xx as eash shift. + * see + * snd_soc_runtime_get_dai_fmt() + */ +#define SND_SOC_POSSIBLE_DAIFMT_FORMAT_SHIFT 0 +#define SND_SOC_POSSIBLE_DAIFMT_FORMAT_MASK (0xFFFF << SND_SOC_POSSIBLE_DAIFMT_FORMAT_SHIFT) +#define SND_SOC_POSSIBLE_DAIFMT_I2S (1 << SND_SOC_DAI_FORMAT_I2S) +#define SND_SOC_POSSIBLE_DAIFMT_RIGHT_J (1 << SND_SOC_DAI_FORMAT_RIGHT_J) +#define SND_SOC_POSSIBLE_DAIFMT_LEFT_J (1 << SND_SOC_DAI_FORMAT_LEFT_J) +#define SND_SOC_POSSIBLE_DAIFMT_DSP_A (1 << SND_SOC_DAI_FORMAT_DSP_A) +#define SND_SOC_POSSIBLE_DAIFMT_DSP_B (1 << SND_SOC_DAI_FORMAT_DSP_B) +#define SND_SOC_POSSIBLE_DAIFMT_AC97 (1 << SND_SOC_DAI_FORMAT_AC97) +#define SND_SOC_POSSIBLE_DAIFMT_PDM (1 << SND_SOC_DAI_FORMAT_PDM) + /* * DAI Clock gating. * @@ -45,6 +61,17 @@ struct snd_compr_stream; #define SND_SOC_DAIFMT_CONT (1 << 4) /* continuous clock */ #define SND_SOC_DAIFMT_GATED (0 << 4) /* clock is gated */ +/* Describes the possible PCM format */ +/* + * define GATED -> CONT. GATED will be selected if both are selected. + * see + * snd_soc_runtime_get_dai_fmt() + */ +#define SND_SOC_POSSIBLE_DAIFMT_CLOCK_SHIFT 16 +#define SND_SOC_POSSIBLE_DAIFMT_CLOCK_MASK (0xFFFF << SND_SOC_POSSIBLE_DAIFMT_CLOCK_SHIFT) +#define SND_SOC_POSSIBLE_DAIFMT_GATED (0x1ULL << SND_SOC_POSSIBLE_DAIFMT_CLOCK_SHIFT) +#define SND_SOC_POSSIBLE_DAIFMT_CONT (0x2ULL << SND_SOC_POSSIBLE_DAIFMT_CLOCK_SHIFT) + /* * DAI hardware signal polarity. * @@ -71,6 +98,14 @@ struct snd_compr_stream; #define SND_SOC_DAIFMT_IB_NF (3 << 8) /* invert BCLK + nor FRM */ #define SND_SOC_DAIFMT_IB_IF (4 << 8) /* invert BCLK + FRM */ +/* Describes the possible PCM format */ +#define SND_SOC_POSSIBLE_DAIFMT_INV_SHIFT 32 +#define SND_SOC_POSSIBLE_DAIFMT_INV_MASK (0xFFFFULL << SND_SOC_POSSIBLE_DAIFMT_INV_SHIFT) +#define SND_SOC_POSSIBLE_DAIFMT_NB_NF (0x1ULL << SND_SOC_POSSIBLE_DAIFMT_INV_SHIFT) +#define SND_SOC_POSSIBLE_DAIFMT_NB_IF (0x2ULL << SND_SOC_POSSIBLE_DAIFMT_INV_SHIFT) +#define SND_SOC_POSSIBLE_DAIFMT_IB_NF (0x4ULL << SND_SOC_POSSIBLE_DAIFMT_INV_SHIFT) +#define SND_SOC_POSSIBLE_DAIFMT_IB_IF (0x8ULL << SND_SOC_POSSIBLE_DAIFMT_INV_SHIFT) + /* * DAI hardware clock providers/consumers * @@ -81,7 +116,7 @@ struct snd_compr_stream; #define SND_SOC_DAIFMT_CBP_CFP (1 << 12) /* codec clk provider & frame provider */ #define SND_SOC_DAIFMT_CBC_CFP (2 << 12) /* codec clk consumer & frame provider */ #define SND_SOC_DAIFMT_CBP_CFC (3 << 12) /* codec clk provider & frame consumer */ -#define SND_SOC_DAIFMT_CBC_CFC (4 << 12) /* codec clk consumer & frame follower */ +#define SND_SOC_DAIFMT_CBC_CFC (4 << 12) /* codec clk consumer & frame consumer */ /* previous definitions kept for backwards-compatibility, do not use in new contributions */ #define SND_SOC_DAIFMT_CBM_CFM SND_SOC_DAIFMT_CBP_CFP @@ -89,6 +124,14 @@ struct snd_compr_stream; #define SND_SOC_DAIFMT_CBM_CFS SND_SOC_DAIFMT_CBP_CFC #define SND_SOC_DAIFMT_CBS_CFS SND_SOC_DAIFMT_CBC_CFC +/* Describes the possible PCM format */ +#define SND_SOC_POSSIBLE_DAIFMT_CLOCK_PROVIDER_SHIFT 48 +#define SND_SOC_POSSIBLE_DAIFMT_CLOCK_PROVIDER_MASK (0xFFFFULL << SND_SOC_POSSIBLE_DAIFMT_CLOCK_PROVIDER_SHIFT) +#define SND_SOC_POSSIBLE_DAIFMT_CBP_CFP (0x1ULL << SND_SOC_POSSIBLE_DAIFMT_CLOCK_PROVIDER_SHIFT) +#define SND_SOC_POSSIBLE_DAIFMT_CBC_CFP (0x2ULL << SND_SOC_POSSIBLE_DAIFMT_CLOCK_PROVIDER_SHIFT) +#define SND_SOC_POSSIBLE_DAIFMT_CBP_CFC (0x4ULL << SND_SOC_POSSIBLE_DAIFMT_CLOCK_PROVIDER_SHIFT) +#define SND_SOC_POSSIBLE_DAIFMT_CBC_CFC (0x8ULL << SND_SOC_POSSIBLE_DAIFMT_CLOCK_PROVIDER_SHIFT) + #define SND_SOC_DAIFMT_FORMAT_MASK 0x000f #define SND_SOC_DAIFMT_CLOCK_MASK 0x00f0 #define SND_SOC_DAIFMT_INV_MASK 0x0f00 @@ -131,6 +174,8 @@ int snd_soc_dai_set_pll(struct snd_soc_dai *dai, int snd_soc_dai_set_bclk_ratio(struct snd_soc_dai *dai, unsigned int ratio); /* Digital Audio interface formatting */ +int snd_soc_dai_get_fmt_max_priority(struct snd_soc_pcm_runtime *rtd); +u64 snd_soc_dai_get_fmt(struct snd_soc_dai *dai, int priority); int snd_soc_dai_set_fmt(struct snd_soc_dai *dai, unsigned int fmt); int snd_soc_dai_set_tdm_slot(struct snd_soc_dai *dai, @@ -292,6 +337,16 @@ struct snd_soc_dai_ops { snd_pcm_sframes_t (*delay)(struct snd_pcm_substream *, struct snd_soc_dai *); + /* + * Format list for auto selection. + * Format will be increased if priority format was + * not selected. + * see + * snd_soc_dai_get_fmt() + */ + u64 *auto_selectable_formats; + int num_auto_selectable_formats; + /* bit field */ unsigned int no_capture_mute:1; }; diff --git a/include/sound/soc-topology.h b/include/sound/soc-topology.h index 328cf763d9..4afd667e12 100644 --- a/include/sound/soc-topology.h +++ b/include/sound/soc-topology.h @@ -54,7 +54,7 @@ struct snd_soc_dobj_control { /* dynamic widget object */ struct snd_soc_dobj_widget { - unsigned int kcontrol_type; /* kcontrol type: mixer, enum, bytes */ + unsigned int *kcontrol_type; /* kcontrol type: mixer, enum, bytes */ }; /* generic dynamic object - all dynamic objects belong to this struct */ diff --git a/include/sound/soc.h b/include/sound/soc.h index e746da9963..675849d072 100644 --- a/include/sound/soc.h +++ b/include/sound/soc.h @@ -1232,10 +1232,23 @@ void snd_soc_of_parse_audio_prefix(struct snd_soc_card *card, int snd_soc_of_parse_audio_routing(struct snd_soc_card *card, const char *propname); int snd_soc_of_parse_aux_devs(struct snd_soc_card *card, const char *propname); -unsigned int snd_soc_of_parse_daifmt(struct device_node *np, - const char *prefix, - struct device_node **bitclkmaster, - struct device_node **framemaster); + +unsigned int snd_soc_daifmt_clock_provider_fliped(unsigned int dai_fmt); +unsigned int snd_soc_daifmt_clock_provider_from_bitmap(unsigned int bit_frame); + +unsigned int snd_soc_daifmt_parse_format(struct device_node *np, const char *prefix); +unsigned int snd_soc_daifmt_parse_clock_provider_raw(struct device_node *np, + const char *prefix, + struct device_node **bitclkmaster, + struct device_node **framemaster); +#define snd_soc_daifmt_parse_clock_provider_as_bitmap(np, prefix) \ + snd_soc_daifmt_parse_clock_provider_raw(np, prefix, NULL, NULL) +#define snd_soc_daifmt_parse_clock_provider_as_phandle \ + snd_soc_daifmt_parse_clock_provider_raw +#define snd_soc_daifmt_parse_clock_provider_as_flag(np, prefix) \ + snd_soc_daifmt_clock_provider_from_bitmap( \ + snd_soc_daifmt_parse_clock_provider_as_bitmap(np, prefix)) + int snd_soc_get_dai_id(struct device_node *ep); int snd_soc_get_dai_name(const struct of_phandle_args *args, const char **dai_name); diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index d1f7d2a453..85c16c266e 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -326,6 +326,7 @@ struct t10_wwn { char model[INQUIRY_MODEL_LEN + 1]; char revision[INQUIRY_REVISION_LEN + 1]; char unit_serial[INQUIRY_VPD_SERIAL_LEN]; + u32 company_id; spinlock_t t10_vpd_lock; struct se_device *t10_dev; struct config_group t10_wwn_group; diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h index a41dd8a0c7..b671b1f2ce 100644 --- a/include/trace/events/btrfs.h +++ b/include/trace/events/btrfs.h @@ -99,8 +99,7 @@ struct btrfs_space_info; EM( ALLOC_CHUNK, "ALLOC_CHUNK") \ EM( ALLOC_CHUNK_FORCE, "ALLOC_CHUNK_FORCE") \ EM( RUN_DELAYED_IPUTS, "RUN_DELAYED_IPUTS") \ - EM( COMMIT_TRANS, "COMMIT_TRANS") \ - EMe(FORCE_COMMIT_TRANS, "FORCE_COMMIT_TRANS") + EMe(COMMIT_TRANS, "COMMIT_TRANS") /* * First define the enums in the above macros to be exported to userspace via @@ -654,34 +653,30 @@ DEFINE_EVENT(btrfs__writepage, __extent_writepage, TRACE_EVENT(btrfs_writepage_end_io_hook, - TP_PROTO(const struct page *page, u64 start, u64 end, int uptodate), + TP_PROTO(const struct btrfs_inode *inode, u64 start, u64 end, + int uptodate), - TP_ARGS(page, start, end, uptodate), + TP_ARGS(inode, start, end, uptodate), TP_STRUCT__entry_btrfs( __field( u64, ino ) - __field( unsigned long, index ) __field( u64, start ) __field( u64, end ) __field( int, uptodate ) __field( u64, root_objectid ) ), - TP_fast_assign_btrfs(btrfs_sb(page->mapping->host->i_sb), - __entry->ino = btrfs_ino(BTRFS_I(page->mapping->host)); - __entry->index = page->index; + TP_fast_assign_btrfs(inode->root->fs_info, + __entry->ino = btrfs_ino(inode); __entry->start = start; __entry->end = end; __entry->uptodate = uptodate; - __entry->root_objectid = - BTRFS_I(page->mapping->host)->root->root_key.objectid; + __entry->root_objectid = inode->root->root_key.objectid; ), - TP_printk_btrfs("root=%llu(%s) ino=%llu page_index=%lu start=%llu " - "end=%llu uptodate=%d", + TP_printk_btrfs("root=%llu(%s) ino=%llu start=%llu end=%llu uptodate=%d", show_root_type(__entry->root_objectid), - __entry->ino, __entry->index, - __entry->start, + __entry->ino, __entry->start, __entry->end, __entry->uptodate) ); @@ -1097,7 +1092,7 @@ TRACE_EVENT(btrfs_trigger_flush, __entry->flags = flags; __entry->bytes = bytes; __entry->flush = flush; - __assign_str(reason, reason) + __assign_str(reason, reason); ), TP_printk_btrfs("%s: flush=%d(%s) flags=%llu(%s) bytes=%llu", diff --git a/include/trace/events/cma.h b/include/trace/events/cma.h index c3d354702c..3d708dae15 100644 --- a/include/trace/events/cma.h +++ b/include/trace/events/cma.h @@ -31,7 +31,7 @@ DECLARE_EVENT_CLASS(cma_alloc_class, __entry->align = align; ), - TP_printk("name=%s pfn=%lx page=%p count=%lu align=%u", + TP_printk("name=%s pfn=0x%lx page=%p count=%lu align=%u", __get_str(name), __entry->pfn, __entry->page, @@ -60,7 +60,7 @@ TRACE_EVENT(cma_release, __entry->count = count; ), - TP_printk("name=%s pfn=%lx page=%p count=%lu", + TP_printk("name=%s pfn=0x%lx page=%p count=%lu", __get_str(name), __entry->pfn, __entry->page, diff --git a/include/trace/events/dma_fence.h b/include/trace/events/dma_fence.h index 64e92d56c6..3963e79ca7 100644 --- a/include/trace/events/dma_fence.h +++ b/include/trace/events/dma_fence.h @@ -23,8 +23,8 @@ DECLARE_EVENT_CLASS(dma_fence, ), TP_fast_assign( - __assign_str(driver, fence->ops->get_driver_name(fence)) - __assign_str(timeline, fence->ops->get_timeline_name(fence)) + __assign_str(driver, fence->ops->get_driver_name(fence)); + __assign_str(timeline, fence->ops->get_timeline_name(fence)); __entry->context = fence->context; __entry->seqno = fence->seqno; ), diff --git a/include/trace/events/filemap.h b/include/trace/events/filemap.h index 796053e162..c47b63db12 100644 --- a/include/trace/events/filemap.h +++ b/include/trace/events/filemap.h @@ -36,7 +36,7 @@ DECLARE_EVENT_CLASS(mm_filemap_op_page_cache, __entry->s_dev = page->mapping->host->i_rdev; ), - TP_printk("dev %d:%d ino %lx page=%p pfn=%lu ofs=%lu", + TP_printk("dev %d:%d ino %lx page=%p pfn=0x%lx ofs=%lu", MAJOR(__entry->s_dev), MINOR(__entry->s_dev), __entry->i_ino, pfn_to_page(__entry->pfn), diff --git a/include/trace/events/intel_iommu.h b/include/trace/events/intel_iommu.h index d233f29165..e5c1ca6d16 100644 --- a/include/trace/events/intel_iommu.h +++ b/include/trace/events/intel_iommu.h @@ -15,6 +15,8 @@ #include #include +#define MSG_MAX 256 + TRACE_EVENT(qi_submit, TP_PROTO(struct intel_iommu *iommu, u64 qw0, u64 qw1, u64 qw2, u64 qw3), @@ -51,6 +53,41 @@ TRACE_EVENT(qi_submit, __entry->qw0, __entry->qw1, __entry->qw2, __entry->qw3 ) ); + +TRACE_EVENT(prq_report, + TP_PROTO(struct intel_iommu *iommu, struct device *dev, + u64 dw0, u64 dw1, u64 dw2, u64 dw3, + unsigned long seq), + + TP_ARGS(iommu, dev, dw0, dw1, dw2, dw3, seq), + + TP_STRUCT__entry( + __field(u64, dw0) + __field(u64, dw1) + __field(u64, dw2) + __field(u64, dw3) + __field(unsigned long, seq) + __string(iommu, iommu->name) + __string(dev, dev_name(dev)) + __dynamic_array(char, buff, MSG_MAX) + ), + + TP_fast_assign( + __entry->dw0 = dw0; + __entry->dw1 = dw1; + __entry->dw2 = dw2; + __entry->dw3 = dw3; + __entry->seq = seq; + __assign_str(iommu, iommu->name); + __assign_str(dev, dev_name(dev)); + ), + + TP_printk("%s/%s seq# %ld: %s", + __get_str(iommu), __get_str(dev), __entry->seq, + decode_prq_descriptor(__get_str(buff), MSG_MAX, __entry->dw0, + __entry->dw1, __entry->dw2, __entry->dw3) + ) +); #endif /* _TRACE_INTEL_IOMMU_H */ /* This part must be outside protection */ diff --git a/include/trace/events/io_uring.h b/include/trace/events/io_uring.h index abb8b24744..e4e44a2b4a 100644 --- a/include/trace/events/io_uring.h +++ b/include/trace/events/io_uring.h @@ -12,11 +12,11 @@ struct io_wq_work; /** * io_uring_create - called after a new io_uring context was prepared * - * @fd: corresponding file descriptor - * @ctx: pointer to a ring context structure + * @fd: corresponding file descriptor + * @ctx: pointer to a ring context structure * @sq_entries: actual SQ size * @cq_entries: actual CQ size - * @flags: SQ ring flags, provided to io_uring_setup(2) + * @flags: SQ ring flags, provided to io_uring_setup(2) * * Allows to trace io_uring creation and provide pointer to a context, that can * be used later to find correlated events. @@ -52,12 +52,12 @@ TRACE_EVENT(io_uring_create, * io_uring_register - called after a buffer/file/eventfd was successfully * registered for a ring * - * @ctx: pointer to a ring context structure - * @opcode: describes which operation to perform + * @ctx: pointer to a ring context structure + * @opcode: describes which operation to perform * @nr_user_files: number of registered files * @nr_user_bufs: number of registered buffers * @cq_ev_fd: whether eventfs registered or not - * @ret: return code + * @ret: return code * * Allows to trace fixed files/buffers/eventfds, that could be registered to * avoid an overhead of getting references to them for every operation. This @@ -142,16 +142,16 @@ TRACE_EVENT(io_uring_queue_async_work, TP_ARGS(ctx, rw, req, work, flags), TP_STRUCT__entry ( - __field( void *, ctx ) - __field( int, rw ) - __field( void *, req ) + __field( void *, ctx ) + __field( int, rw ) + __field( void *, req ) __field( struct io_wq_work *, work ) __field( unsigned int, flags ) ), TP_fast_assign( __entry->ctx = ctx; - __entry->rw = rw; + __entry->rw = rw; __entry->req = req; __entry->work = work; __entry->flags = flags; @@ -196,10 +196,10 @@ TRACE_EVENT(io_uring_defer, /** * io_uring_link - called before the io_uring request added into link_list of - * another request + * another request * - * @ctx: pointer to a ring context structure - * @req: pointer to a linked request + * @ctx: pointer to a ring context structure + * @req: pointer to a linked request * @target_req: pointer to a previous request, that would contain @req * * Allows to track linked requests, to understand dependencies between requests @@ -212,8 +212,8 @@ TRACE_EVENT(io_uring_link, TP_ARGS(ctx, req, target_req), TP_STRUCT__entry ( - __field( void *, ctx ) - __field( void *, req ) + __field( void *, ctx ) + __field( void *, req ) __field( void *, target_req ) ), @@ -244,7 +244,7 @@ TRACE_EVENT(io_uring_cqring_wait, TP_ARGS(ctx, min_events), TP_STRUCT__entry ( - __field( void *, ctx ) + __field( void *, ctx ) __field( int, min_events ) ), @@ -272,7 +272,7 @@ TRACE_EVENT(io_uring_fail_link, TP_ARGS(req, link), TP_STRUCT__entry ( - __field( void *, req ) + __field( void *, req ) __field( void *, link ) ), @@ -318,13 +318,14 @@ TRACE_EVENT(io_uring_complete, __entry->res, __entry->cflags) ); - /** * io_uring_submit_sqe - called before submitting one SQE * * @ctx: pointer to a ring context structure + * @req: pointer to a submitted request * @opcode: opcode of request * @user_data: user data associated with the request + * @flags request flags * @force_nonblock: whether a context blocking or not * @sq_thread: true if sq_thread has submitted this SQE * @@ -333,41 +334,60 @@ TRACE_EVENT(io_uring_complete, */ TRACE_EVENT(io_uring_submit_sqe, - TP_PROTO(void *ctx, u8 opcode, u64 user_data, bool force_nonblock, - bool sq_thread), + TP_PROTO(void *ctx, void *req, u8 opcode, u64 user_data, u32 flags, + bool force_nonblock, bool sq_thread), - TP_ARGS(ctx, opcode, user_data, force_nonblock, sq_thread), + TP_ARGS(ctx, req, opcode, user_data, flags, force_nonblock, sq_thread), TP_STRUCT__entry ( __field( void *, ctx ) + __field( void *, req ) __field( u8, opcode ) __field( u64, user_data ) + __field( u32, flags ) __field( bool, force_nonblock ) __field( bool, sq_thread ) ), TP_fast_assign( __entry->ctx = ctx; + __entry->req = req; __entry->opcode = opcode; __entry->user_data = user_data; + __entry->flags = flags; __entry->force_nonblock = force_nonblock; __entry->sq_thread = sq_thread; ), - TP_printk("ring %p, op %d, data 0x%llx, non block %d, sq_thread %d", - __entry->ctx, __entry->opcode, - (unsigned long long) __entry->user_data, - __entry->force_nonblock, __entry->sq_thread) + TP_printk("ring %p, req %p, op %d, data 0x%llx, flags %u, " + "non block %d, sq_thread %d", __entry->ctx, __entry->req, + __entry->opcode, (unsigned long long)__entry->user_data, + __entry->flags, __entry->force_nonblock, __entry->sq_thread) ); +/* + * io_uring_poll_arm - called after arming a poll wait if successful + * + * @ctx: pointer to a ring context structure + * @req: pointer to the armed request + * @opcode: opcode of request + * @user_data: user data associated with the request + * @mask: request poll events mask + * @events: registered events of interest + * + * Allows to track which fds are waiting for and what are the events of + * interest. + */ TRACE_EVENT(io_uring_poll_arm, - TP_PROTO(void *ctx, u8 opcode, u64 user_data, int mask, int events), + TP_PROTO(void *ctx, void *req, u8 opcode, u64 user_data, + int mask, int events), - TP_ARGS(ctx, opcode, user_data, mask, events), + TP_ARGS(ctx, req, opcode, user_data, mask, events), TP_STRUCT__entry ( __field( void *, ctx ) + __field( void *, req ) __field( u8, opcode ) __field( u64, user_data ) __field( int, mask ) @@ -376,16 +396,17 @@ TRACE_EVENT(io_uring_poll_arm, TP_fast_assign( __entry->ctx = ctx; + __entry->req = req; __entry->opcode = opcode; __entry->user_data = user_data; __entry->mask = mask; __entry->events = events; ), - TP_printk("ring %p, op %d, data 0x%llx, mask 0x%x, events 0x%x", - __entry->ctx, __entry->opcode, - (unsigned long long) __entry->user_data, - __entry->mask, __entry->events) + TP_printk("ring %p, req %p, op %d, data 0x%llx, mask 0x%x, events 0x%x", + __entry->ctx, __entry->req, __entry->opcode, + (unsigned long long) __entry->user_data, + __entry->mask, __entry->events) ); TRACE_EVENT(io_uring_poll_wake, @@ -440,27 +461,40 @@ TRACE_EVENT(io_uring_task_add, __entry->mask) ); +/* + * io_uring_task_run - called when task_work_run() executes the poll events + * notification callbacks + * + * @ctx: pointer to a ring context structure + * @req: pointer to the armed request + * @opcode: opcode of request + * @user_data: user data associated with the request + * + * Allows to track when notified poll events are processed + */ TRACE_EVENT(io_uring_task_run, - TP_PROTO(void *ctx, u8 opcode, u64 user_data), + TP_PROTO(void *ctx, void *req, u8 opcode, u64 user_data), - TP_ARGS(ctx, opcode, user_data), + TP_ARGS(ctx, req, opcode, user_data), TP_STRUCT__entry ( __field( void *, ctx ) + __field( void *, req ) __field( u8, opcode ) __field( u64, user_data ) ), TP_fast_assign( __entry->ctx = ctx; + __entry->req = req; __entry->opcode = opcode; __entry->user_data = user_data; ), - TP_printk("ring %p, op %d, data 0x%llx", - __entry->ctx, __entry->opcode, - (unsigned long long) __entry->user_data) + TP_printk("ring %p, req %p, op %d, data 0x%llx", + __entry->ctx, __entry->req, __entry->opcode, + (unsigned long long) __entry->user_data) ); #endif /* _TRACE_IO_URING_H */ diff --git a/include/trace/events/jbd2.h b/include/trace/events/jbd2.h index d16a32867f..a4dfe00598 100644 --- a/include/trace/events/jbd2.h +++ b/include/trace/events/jbd2.h @@ -394,6 +394,107 @@ TRACE_EVENT(jbd2_lock_buffer_stall, __entry->stall_ms) ); +DECLARE_EVENT_CLASS(jbd2_journal_shrink, + + TP_PROTO(journal_t *journal, unsigned long nr_to_scan, + unsigned long count), + + TP_ARGS(journal, nr_to_scan, count), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(unsigned long, nr_to_scan) + __field(unsigned long, count) + ), + + TP_fast_assign( + __entry->dev = journal->j_fs_dev->bd_dev; + __entry->nr_to_scan = nr_to_scan; + __entry->count = count; + ), + + TP_printk("dev %d,%d nr_to_scan %lu count %lu", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->nr_to_scan, __entry->count) +); + +DEFINE_EVENT(jbd2_journal_shrink, jbd2_shrink_count, + + TP_PROTO(journal_t *journal, unsigned long nr_to_scan, unsigned long count), + + TP_ARGS(journal, nr_to_scan, count) +); + +DEFINE_EVENT(jbd2_journal_shrink, jbd2_shrink_scan_enter, + + TP_PROTO(journal_t *journal, unsigned long nr_to_scan, unsigned long count), + + TP_ARGS(journal, nr_to_scan, count) +); + +TRACE_EVENT(jbd2_shrink_scan_exit, + + TP_PROTO(journal_t *journal, unsigned long nr_to_scan, + unsigned long nr_shrunk, unsigned long count), + + TP_ARGS(journal, nr_to_scan, nr_shrunk, count), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(unsigned long, nr_to_scan) + __field(unsigned long, nr_shrunk) + __field(unsigned long, count) + ), + + TP_fast_assign( + __entry->dev = journal->j_fs_dev->bd_dev; + __entry->nr_to_scan = nr_to_scan; + __entry->nr_shrunk = nr_shrunk; + __entry->count = count; + ), + + TP_printk("dev %d,%d nr_to_scan %lu nr_shrunk %lu count %lu", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->nr_to_scan, __entry->nr_shrunk, + __entry->count) +); + +TRACE_EVENT(jbd2_shrink_checkpoint_list, + + TP_PROTO(journal_t *journal, tid_t first_tid, tid_t tid, tid_t last_tid, + unsigned long nr_freed, unsigned long nr_scanned, + tid_t next_tid), + + TP_ARGS(journal, first_tid, tid, last_tid, nr_freed, + nr_scanned, next_tid), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(tid_t, first_tid) + __field(tid_t, tid) + __field(tid_t, last_tid) + __field(unsigned long, nr_freed) + __field(unsigned long, nr_scanned) + __field(tid_t, next_tid) + ), + + TP_fast_assign( + __entry->dev = journal->j_fs_dev->bd_dev; + __entry->first_tid = first_tid; + __entry->tid = tid; + __entry->last_tid = last_tid; + __entry->nr_freed = nr_freed; + __entry->nr_scanned = nr_scanned; + __entry->next_tid = next_tid; + ), + + TP_printk("dev %d,%d shrink transaction %u-%u(%u) freed %lu " + "scanned %lu next transaction %u", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->first_tid, __entry->tid, __entry->last_tid, + __entry->nr_freed, __entry->nr_scanned, __entry->next_tid) +); + #endif /* _TRACE_JBD2_H */ /* This part must be outside protection */ diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h index 829a75692c..ddc8c944f4 100644 --- a/include/trace/events/kmem.h +++ b/include/trace/events/kmem.h @@ -173,7 +173,7 @@ TRACE_EVENT(mm_page_free, __entry->order = order; ), - TP_printk("page=%p pfn=%lu order=%d", + TP_printk("page=%p pfn=0x%lx order=%d", pfn_to_page(__entry->pfn), __entry->pfn, __entry->order) @@ -193,7 +193,7 @@ TRACE_EVENT(mm_page_free_batched, __entry->pfn = page_to_pfn(page); ), - TP_printk("page=%p pfn=%lu order=0", + TP_printk("page=%p pfn=0x%lx order=0", pfn_to_page(__entry->pfn), __entry->pfn) ); @@ -219,7 +219,7 @@ TRACE_EVENT(mm_page_alloc, __entry->migratetype = migratetype; ), - TP_printk("page=%p pfn=%lu order=%d migratetype=%d gfp_flags=%s", + TP_printk("page=%p pfn=0x%lx order=%d migratetype=%d gfp_flags=%s", __entry->pfn != -1UL ? pfn_to_page(__entry->pfn) : NULL, __entry->pfn != -1UL ? __entry->pfn : 0, __entry->order, @@ -245,7 +245,7 @@ DECLARE_EVENT_CLASS(mm_page, __entry->migratetype = migratetype; ), - TP_printk("page=%p pfn=%lu order=%u migratetype=%d percpu_refill=%d", + TP_printk("page=%p pfn=0x%lx order=%u migratetype=%d percpu_refill=%d", __entry->pfn != -1UL ? pfn_to_page(__entry->pfn) : NULL, __entry->pfn != -1UL ? __entry->pfn : 0, __entry->order, @@ -278,7 +278,7 @@ TRACE_EVENT(mm_page_pcpu_drain, __entry->migratetype = migratetype; ), - TP_printk("page=%p pfn=%lu order=%d migratetype=%d", + TP_printk("page=%p pfn=0x%lx order=%d migratetype=%d", pfn_to_page(__entry->pfn), __entry->pfn, __entry->order, __entry->migratetype) ); @@ -312,7 +312,7 @@ TRACE_EVENT(mm_page_alloc_extfrag, get_pageblock_migratetype(page)); ), - TP_printk("page=%p pfn=%lu alloc_order=%d fallback_order=%d pageblock_order=%d alloc_migratetype=%d fallback_migratetype=%d fragmenting=%d change_ownership=%d", + TP_printk("page=%p pfn=0x%lx alloc_order=%d fallback_order=%d pageblock_order=%d alloc_migratetype=%d fallback_migratetype=%d fragmenting=%d change_ownership=%d", pfn_to_page(__entry->pfn), __entry->pfn, __entry->alloc_order, diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h index 629c7a0eaf..390270e00a 100644 --- a/include/trace/events/mmflags.h +++ b/include/trace/events/mmflags.h @@ -85,6 +85,12 @@ #define IF_HAVE_PG_ARCH_2(flag,string) #endif +#ifdef CONFIG_KASAN_HW_TAGS +#define IF_HAVE_PG_SKIP_KASAN_POISON(flag,string) ,{1UL << flag, string} +#else +#define IF_HAVE_PG_SKIP_KASAN_POISON(flag,string) +#endif + #define __def_pageflag_names \ {1UL << PG_locked, "locked" }, \ {1UL << PG_waiters, "waiters" }, \ @@ -112,7 +118,8 @@ IF_HAVE_PG_UNCACHED(PG_uncached, "uncached" ) \ IF_HAVE_PG_HWPOISON(PG_hwpoison, "hwpoison" ) \ IF_HAVE_PG_IDLE(PG_young, "young" ) \ IF_HAVE_PG_IDLE(PG_idle, "idle" ) \ -IF_HAVE_PG_ARCH_2(PG_arch_2, "arch_2" ) +IF_HAVE_PG_ARCH_2(PG_arch_2, "arch_2" ) \ +IF_HAVE_PG_SKIP_KASAN_POISON(PG_skip_kasan_poison, "skip_kasan_poison") #define show_page_flags(flags) \ (flags) ? __print_flags(flags, "|", \ diff --git a/include/trace/events/mptcp.h b/include/trace/events/mptcp.h index 775a46d0b0..6bf43176f1 100644 --- a/include/trace/events/mptcp.h +++ b/include/trace/events/mptcp.h @@ -73,6 +73,7 @@ DECLARE_EVENT_CLASS(mptcp_dump_mpext, __field(u64, data_seq) __field(u32, subflow_seq) __field(u16, data_len) + __field(u16, csum) __field(u8, use_map) __field(u8, dsn64) __field(u8, data_fin) @@ -82,6 +83,7 @@ DECLARE_EVENT_CLASS(mptcp_dump_mpext, __field(u8, frozen) __field(u8, reset_transient) __field(u8, reset_reason) + __field(u8, csum_reqd) ), TP_fast_assign( @@ -89,6 +91,7 @@ DECLARE_EVENT_CLASS(mptcp_dump_mpext, __entry->data_seq = mpext->data_seq; __entry->subflow_seq = mpext->subflow_seq; __entry->data_len = mpext->data_len; + __entry->csum = (__force u16)mpext->csum; __entry->use_map = mpext->use_map; __entry->dsn64 = mpext->dsn64; __entry->data_fin = mpext->data_fin; @@ -98,16 +101,18 @@ DECLARE_EVENT_CLASS(mptcp_dump_mpext, __entry->frozen = mpext->frozen; __entry->reset_transient = mpext->reset_transient; __entry->reset_reason = mpext->reset_reason; + __entry->csum_reqd = mpext->csum_reqd; ), - TP_printk("data_ack=%llu data_seq=%llu subflow_seq=%u data_len=%u use_map=%u dsn64=%u data_fin=%u use_ack=%u ack64=%u mpc_map=%u frozen=%u reset_transient=%u reset_reason=%u", + TP_printk("data_ack=%llu data_seq=%llu subflow_seq=%u data_len=%u csum=%x use_map=%u dsn64=%u data_fin=%u use_ack=%u ack64=%u mpc_map=%u frozen=%u reset_transient=%u reset_reason=%u csum_reqd=%u", __entry->data_ack, __entry->data_seq, __entry->subflow_seq, __entry->data_len, - __entry->use_map, __entry->dsn64, - __entry->data_fin, __entry->use_ack, - __entry->ack64, __entry->mpc_map, - __entry->frozen, __entry->reset_transient, - __entry->reset_reason) + __entry->csum, __entry->use_map, + __entry->dsn64, __entry->data_fin, + __entry->use_ack, __entry->ack64, + __entry->mpc_map, __entry->frozen, + __entry->reset_transient, __entry->reset_reason, + __entry->csum_reqd) ); DEFINE_EVENT(mptcp_dump_mpext, get_mapping_status, diff --git a/include/trace/events/osnoise.h b/include/trace/events/osnoise.h new file mode 100644 index 0000000000..82f741ec0f --- /dev/null +++ b/include/trace/events/osnoise.h @@ -0,0 +1,142 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM osnoise + +#if !defined(_OSNOISE_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) +#define _OSNOISE_TRACE_H + +#include +TRACE_EVENT(thread_noise, + + TP_PROTO(struct task_struct *t, u64 start, u64 duration), + + TP_ARGS(t, start, duration), + + TP_STRUCT__entry( + __array( char, comm, TASK_COMM_LEN) + __field( u64, start ) + __field( u64, duration) + __field( pid_t, pid ) + ), + + TP_fast_assign( + memcpy(__entry->comm, t->comm, TASK_COMM_LEN); + __entry->pid = t->pid; + __entry->start = start; + __entry->duration = duration; + ), + + TP_printk("%8s:%d start %llu.%09u duration %llu ns", + __entry->comm, + __entry->pid, + __print_ns_to_secs(__entry->start), + __print_ns_without_secs(__entry->start), + __entry->duration) +); + +TRACE_EVENT(softirq_noise, + + TP_PROTO(int vector, u64 start, u64 duration), + + TP_ARGS(vector, start, duration), + + TP_STRUCT__entry( + __field( u64, start ) + __field( u64, duration) + __field( int, vector ) + ), + + TP_fast_assign( + __entry->vector = vector; + __entry->start = start; + __entry->duration = duration; + ), + + TP_printk("%8s:%d start %llu.%09u duration %llu ns", + show_softirq_name(__entry->vector), + __entry->vector, + __print_ns_to_secs(__entry->start), + __print_ns_without_secs(__entry->start), + __entry->duration) +); + +TRACE_EVENT(irq_noise, + + TP_PROTO(int vector, const char *desc, u64 start, u64 duration), + + TP_ARGS(vector, desc, start, duration), + + TP_STRUCT__entry( + __field( u64, start ) + __field( u64, duration) + __string( desc, desc ) + __field( int, vector ) + + ), + + TP_fast_assign( + __assign_str(desc, desc); + __entry->vector = vector; + __entry->start = start; + __entry->duration = duration; + ), + + TP_printk("%s:%d start %llu.%09u duration %llu ns", + __get_str(desc), + __entry->vector, + __print_ns_to_secs(__entry->start), + __print_ns_without_secs(__entry->start), + __entry->duration) +); + +TRACE_EVENT(nmi_noise, + + TP_PROTO(u64 start, u64 duration), + + TP_ARGS(start, duration), + + TP_STRUCT__entry( + __field( u64, start ) + __field( u64, duration) + ), + + TP_fast_assign( + __entry->start = start; + __entry->duration = duration; + ), + + TP_printk("start %llu.%09u duration %llu ns", + __print_ns_to_secs(__entry->start), + __print_ns_without_secs(__entry->start), + __entry->duration) +); + +TRACE_EVENT(sample_threshold, + + TP_PROTO(u64 start, u64 duration, u64 interference), + + TP_ARGS(start, duration, interference), + + TP_STRUCT__entry( + __field( u64, start ) + __field( u64, duration) + __field( u64, interference) + ), + + TP_fast_assign( + __entry->start = start; + __entry->duration = duration; + __entry->interference = interference; + ), + + TP_printk("start %llu.%09u duration %llu ns interference %llu", + __print_ns_to_secs(__entry->start), + __print_ns_without_secs(__entry->start), + __entry->duration, + __entry->interference) +); + +#endif /* _TRACE_OSNOISE_H */ + +/* This part must be outside protection */ +#include diff --git a/include/trace/events/page_pool.h b/include/trace/events/page_pool.h index ad0aa7f316..ca53450115 100644 --- a/include/trace/events/page_pool.h +++ b/include/trace/events/page_pool.h @@ -60,7 +60,7 @@ TRACE_EVENT(page_pool_state_release, __entry->pfn = page_to_pfn(page); ), - TP_printk("page_pool=%p page=%p pfn=%lu release=%u", + TP_printk("page_pool=%p page=%p pfn=0x%lx release=%u", __entry->pool, __entry->page, __entry->pfn, __entry->release) ); @@ -85,7 +85,7 @@ TRACE_EVENT(page_pool_state_hold, __entry->pfn = page_to_pfn(page); ), - TP_printk("page_pool=%p page=%p pfn=%lu hold=%u", + TP_printk("page_pool=%p page=%p pfn=0x%lx hold=%u", __entry->pool, __entry->page, __entry->pfn, __entry->hold) ); diff --git a/include/trace/events/pagemap.h b/include/trace/events/pagemap.h index e1735fe7c7..1d28431e85 100644 --- a/include/trace/events/pagemap.h +++ b/include/trace/events/pagemap.h @@ -46,7 +46,7 @@ TRACE_EVENT(mm_lru_insertion, ), /* Flag format is based on page-types.c formatting for pagemap */ - TP_printk("page=%p pfn=%lu lru=%d flags=%s%s%s%s%s%s", + TP_printk("page=%p pfn=0x%lx lru=%d flags=%s%s%s%s%s%s", __entry->page, __entry->pfn, __entry->lru, @@ -75,7 +75,7 @@ TRACE_EVENT(mm_lru_activate, ), /* Flag format is based on page-types.c formatting for pagemap */ - TP_printk("page=%p pfn=%lu", __entry->page, __entry->pfn) + TP_printk("page=%p pfn=0x%lx", __entry->page, __entry->pfn) ); diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h index 6768b64bc7..670e41783e 100644 --- a/include/trace/events/rcu.h +++ b/include/trace/events/rcu.h @@ -278,6 +278,7 @@ TRACE_EVENT_RCU(rcu_exp_funnel_lock, * "WakeNot": Don't wake rcuo kthread. * "WakeNotPoll": Don't wake rcuo kthread because it is polling. * "WakeOvfIsDeferred": Wake rcuo kthread later, CB list is huge. + * "WakeBypassIsDeferred": Wake rcuo kthread later, bypass list is contended. * "WokeEmpty": rcuo CB kthread woke to find empty list. */ TRACE_EVENT_RCU(rcu_nocb_wake, diff --git a/include/trace/events/rpcgss.h b/include/trace/events/rpcgss.h index ffdbe6f85d..b2a2672e66 100644 --- a/include/trace/events/rpcgss.h +++ b/include/trace/events/rpcgss.h @@ -152,7 +152,7 @@ DECLARE_EVENT_CLASS(rpcgss_ctx_class, TP_fast_assign( __entry->cred = gc; __entry->service = gc->gc_service; - __assign_str(principal, gc->gc_principal) + __assign_str(principal, gc->gc_principal); ), TP_printk("cred=%p service=%s principal='%s'", @@ -535,7 +535,7 @@ TRACE_EVENT(rpcgss_upcall_msg, ), TP_fast_assign( - __assign_str(msg, buf) + __assign_str(msg, buf); ), TP_printk("msg='%s'", __get_str(msg)) diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index 1eca2305ca..94640482cf 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -148,7 +148,6 @@ DECLARE_EVENT_CLASS(sched_wakeup_template, __array( char, comm, TASK_COMM_LEN ) __field( pid_t, pid ) __field( int, prio ) - __field( int, success ) __field( int, target_cpu ) ), @@ -156,7 +155,6 @@ DECLARE_EVENT_CLASS(sched_wakeup_template, memcpy(__entry->comm, p->comm, TASK_COMM_LEN); __entry->pid = p->pid; __entry->prio = p->prio; /* XXX SCHED_DEADLINE */ - __entry->success = 1; /* rudiment, kill when possible */ __entry->target_cpu = task_cpu(p); ), diff --git a/include/trace/events/scsi.h b/include/trace/events/scsi.h index f624969a4f..370ade0d40 100644 --- a/include/trace/events/scsi.h +++ b/include/trace/events/scsi.h @@ -124,50 +124,6 @@ scsi_hostbyte_name(DID_TRANSPORT_DISRUPTED), \ scsi_hostbyte_name(DID_TRANSPORT_FAILFAST)) -#define scsi_driverbyte_name(result) { result, #result } -#define show_driverbyte_name(val) \ - __print_symbolic(val, \ - scsi_driverbyte_name(DRIVER_OK), \ - scsi_driverbyte_name(DRIVER_BUSY), \ - scsi_driverbyte_name(DRIVER_SOFT), \ - scsi_driverbyte_name(DRIVER_MEDIA), \ - scsi_driverbyte_name(DRIVER_ERROR), \ - scsi_driverbyte_name(DRIVER_INVALID), \ - scsi_driverbyte_name(DRIVER_TIMEOUT), \ - scsi_driverbyte_name(DRIVER_HARD), \ - scsi_driverbyte_name(DRIVER_SENSE)) - -#define scsi_msgbyte_name(result) { result, #result } -#define show_msgbyte_name(val) \ - __print_symbolic(val, \ - scsi_msgbyte_name(COMMAND_COMPLETE), \ - scsi_msgbyte_name(EXTENDED_MESSAGE), \ - scsi_msgbyte_name(SAVE_POINTERS), \ - scsi_msgbyte_name(RESTORE_POINTERS), \ - scsi_msgbyte_name(DISCONNECT), \ - scsi_msgbyte_name(INITIATOR_ERROR), \ - scsi_msgbyte_name(ABORT_TASK_SET), \ - scsi_msgbyte_name(MESSAGE_REJECT), \ - scsi_msgbyte_name(NOP), \ - scsi_msgbyte_name(MSG_PARITY_ERROR), \ - scsi_msgbyte_name(LINKED_CMD_COMPLETE), \ - scsi_msgbyte_name(LINKED_FLG_CMD_COMPLETE), \ - scsi_msgbyte_name(TARGET_RESET), \ - scsi_msgbyte_name(ABORT_TASK), \ - scsi_msgbyte_name(CLEAR_TASK_SET), \ - scsi_msgbyte_name(INITIATE_RECOVERY), \ - scsi_msgbyte_name(RELEASE_RECOVERY), \ - scsi_msgbyte_name(CLEAR_ACA), \ - scsi_msgbyte_name(LOGICAL_UNIT_RESET), \ - scsi_msgbyte_name(SIMPLE_QUEUE_TAG), \ - scsi_msgbyte_name(HEAD_OF_QUEUE_TAG), \ - scsi_msgbyte_name(ORDERED_QUEUE_TAG), \ - scsi_msgbyte_name(IGNORE_WIDE_RESIDUE), \ - scsi_msgbyte_name(ACA), \ - scsi_msgbyte_name(QAS_REQUEST), \ - scsi_msgbyte_name(BUS_DEVICE_RESET), \ - scsi_msgbyte_name(ABORT)) - #define scsi_statusbyte_name(result) { result, #result } #define show_statusbyte_name(val) \ __print_symbolic(val, \ @@ -327,9 +283,9 @@ DECLARE_EVENT_CLASS(scsi_cmd_done_timeout_template, show_opcode_name(__entry->opcode), __parse_cdb(__get_dynamic_array(cmnd), __entry->cmd_len), __print_hex(__get_dynamic_array(cmnd), __entry->cmd_len), - show_driverbyte_name(((__entry->result) >> 24) & 0xff), + "DRIVER_OK", show_hostbyte_name(((__entry->result) >> 16) & 0xff), - show_msgbyte_name(((__entry->result) >> 8) & 0xff), + "COMMAND_COMPLETE", show_statusbyte_name(__entry->result & 0xff)) ); diff --git a/include/trace/events/sock.h b/include/trace/events/sock.h index a966d4b5ab..12c3157827 100644 --- a/include/trace/events/sock.h +++ b/include/trace/events/sock.h @@ -201,6 +201,66 @@ TRACE_EVENT(inet_sock_set_state, show_tcp_state_name(__entry->newstate)) ); +TRACE_EVENT(inet_sk_error_report, + + TP_PROTO(const struct sock *sk), + + TP_ARGS(sk), + + TP_STRUCT__entry( + __field(int, error) + __field(__u16, sport) + __field(__u16, dport) + __field(__u16, family) + __field(__u16, protocol) + __array(__u8, saddr, 4) + __array(__u8, daddr, 4) + __array(__u8, saddr_v6, 16) + __array(__u8, daddr_v6, 16) + ), + + TP_fast_assign( + struct inet_sock *inet = inet_sk(sk); + struct in6_addr *pin6; + __be32 *p32; + + __entry->error = sk->sk_err; + __entry->family = sk->sk_family; + __entry->protocol = sk->sk_protocol; + __entry->sport = ntohs(inet->inet_sport); + __entry->dport = ntohs(inet->inet_dport); + + p32 = (__be32 *) __entry->saddr; + *p32 = inet->inet_saddr; + + p32 = (__be32 *) __entry->daddr; + *p32 = inet->inet_daddr; + +#if IS_ENABLED(CONFIG_IPV6) + if (sk->sk_family == AF_INET6) { + pin6 = (struct in6_addr *)__entry->saddr_v6; + *pin6 = sk->sk_v6_rcv_saddr; + pin6 = (struct in6_addr *)__entry->daddr_v6; + *pin6 = sk->sk_v6_daddr; + } else +#endif + { + pin6 = (struct in6_addr *)__entry->saddr_v6; + ipv6_addr_set_v4mapped(inet->inet_saddr, pin6); + pin6 = (struct in6_addr *)__entry->daddr_v6; + ipv6_addr_set_v4mapped(inet->inet_daddr, pin6); + } + ), + + TP_printk("family=%s protocol=%s sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c error=%d", + show_family_name(__entry->family), + show_inet_protocol_name(__entry->protocol), + __entry->sport, __entry->dport, + __entry->saddr, __entry->daddr, + __entry->saddr_v6, __entry->daddr_v6, + __entry->error) +); + #endif /* _TRACE_SOCK_H */ /* This part must be outside protection */ diff --git a/include/trace/events/spi.h b/include/trace/events/spi.h index 0dd9171d2a..c0d9844bef 100644 --- a/include/trace/events/spi.h +++ b/include/trace/events/spi.h @@ -42,6 +42,63 @@ DEFINE_EVENT(spi_controller, spi_controller_busy, ); +TRACE_EVENT(spi_setup, + TP_PROTO(struct spi_device *spi, int status), + TP_ARGS(spi, status), + + TP_STRUCT__entry( + __field(int, bus_num) + __field(int, chip_select) + __field(unsigned long, mode) + __field(unsigned int, bits_per_word) + __field(unsigned int, max_speed_hz) + __field(int, status) + ), + + TP_fast_assign( + __entry->bus_num = spi->controller->bus_num; + __entry->chip_select = spi->chip_select; + __entry->mode = spi->mode; + __entry->bits_per_word = spi->bits_per_word; + __entry->max_speed_hz = spi->max_speed_hz; + __entry->status = status; + ), + + TP_printk("spi%d.%d setup mode %lu, %s%s%s%s%u bits/w, %u Hz max --> %d", + __entry->bus_num, __entry->chip_select, + (__entry->mode & SPI_MODE_X_MASK), + (__entry->mode & SPI_CS_HIGH) ? "cs_high, " : "", + (__entry->mode & SPI_LSB_FIRST) ? "lsb, " : "", + (__entry->mode & SPI_3WIRE) ? "3wire, " : "", + (__entry->mode & SPI_LOOP) ? "loopback, " : "", + __entry->bits_per_word, __entry->max_speed_hz, + __entry->status) +); + +TRACE_EVENT(spi_set_cs, + TP_PROTO(struct spi_device *spi, bool enable), + TP_ARGS(spi, enable), + + TP_STRUCT__entry( + __field(int, bus_num) + __field(int, chip_select) + __field(unsigned long, mode) + __field(bool, enable) + ), + + TP_fast_assign( + __entry->bus_num = spi->controller->bus_num; + __entry->chip_select = spi->chip_select; + __entry->mode = spi->mode; + __entry->enable = enable; + ), + + TP_printk("spi%d.%d %s%s", + __entry->bus_num, __entry->chip_select, + __entry->enable ? "activate" : "deactivate", + (__entry->mode & SPI_CS_HIGH) ? ", cs_high" : "") +); + DECLARE_EVENT_CLASS(spi_message, TP_PROTO(struct spi_message *msg), diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h index d02e01a27b..861f199896 100644 --- a/include/trace/events/sunrpc.h +++ b/include/trace/events/sunrpc.h @@ -154,8 +154,8 @@ TRACE_EVENT(rpc_clnt_new, __entry->client_id = clnt->cl_clid; __assign_str(addr, xprt->address_strings[RPC_DISPLAY_ADDR]); __assign_str(port, xprt->address_strings[RPC_DISPLAY_PORT]); - __assign_str(program, program) - __assign_str(server, server) + __assign_str(program, program); + __assign_str(server, server); ), TP_printk("client=%u peer=[%s]:%s program=%s server=%s", @@ -180,8 +180,8 @@ TRACE_EVENT(rpc_clnt_new_err, TP_fast_assign( __entry->error = error; - __assign_str(program, program) - __assign_str(server, server) + __assign_str(program, program); + __assign_str(server, server); ), TP_printk("program=%s server=%s error=%d", @@ -284,8 +284,8 @@ TRACE_EVENT(rpc_request, __entry->client_id = task->tk_client->cl_clid; __entry->version = task->tk_client->cl_vers; __entry->async = RPC_IS_ASYNC(task); - __assign_str(progname, task->tk_client->cl_program->name) - __assign_str(procname, rpc_proc_name(task)) + __assign_str(progname, task->tk_client->cl_program->name); + __assign_str(procname, rpc_proc_name(task)); ), TP_printk("task:%u@%u %sv%d %s (%ssync)", @@ -494,10 +494,10 @@ DECLARE_EVENT_CLASS(rpc_reply_event, __entry->task_id = task->tk_pid; __entry->client_id = task->tk_client->cl_clid; __entry->xid = be32_to_cpu(task->tk_rqstp->rq_xid); - __assign_str(progname, task->tk_client->cl_program->name) + __assign_str(progname, task->tk_client->cl_program->name); __entry->version = task->tk_client->cl_vers; - __assign_str(procname, rpc_proc_name(task)) - __assign_str(servername, task->tk_xprt->servername) + __assign_str(procname, rpc_proc_name(task)); + __assign_str(servername, task->tk_xprt->servername); ), TP_printk("task:%u@%d server=%s xid=0x%08x %sv%d %s", @@ -622,8 +622,8 @@ TRACE_EVENT(rpc_stats_latency, __entry->task_id = task->tk_pid; __entry->xid = be32_to_cpu(task->tk_rqstp->rq_xid); __entry->version = task->tk_client->cl_vers; - __assign_str(progname, task->tk_client->cl_program->name) - __assign_str(procname, rpc_proc_name(task)) + __assign_str(progname, task->tk_client->cl_program->name); + __assign_str(procname, rpc_proc_name(task)); __entry->backlog = ktime_to_us(backlog); __entry->rtt = ktime_to_us(rtt); __entry->execute = ktime_to_us(execute); @@ -669,15 +669,15 @@ TRACE_EVENT(rpc_xdr_overflow, __entry->task_id = task->tk_pid; __entry->client_id = task->tk_client->cl_clid; __assign_str(progname, - task->tk_client->cl_program->name) + task->tk_client->cl_program->name); __entry->version = task->tk_client->cl_vers; - __assign_str(procedure, task->tk_msg.rpc_proc->p_name) + __assign_str(procedure, task->tk_msg.rpc_proc->p_name); } else { __entry->task_id = 0; __entry->client_id = 0; - __assign_str(progname, "unknown") + __assign_str(progname, "unknown"); __entry->version = 0; - __assign_str(procedure, "unknown") + __assign_str(procedure, "unknown"); } __entry->requested = requested; __entry->end = xdr->end; @@ -735,9 +735,9 @@ TRACE_EVENT(rpc_xdr_alignment, __entry->task_id = task->tk_pid; __entry->client_id = task->tk_client->cl_clid; __assign_str(progname, - task->tk_client->cl_program->name) + task->tk_client->cl_program->name); __entry->version = task->tk_client->cl_vers; - __assign_str(procedure, task->tk_msg.rpc_proc->p_name) + __assign_str(procedure, task->tk_msg.rpc_proc->p_name); __entry->offset = offset; __entry->copied = copied; @@ -1107,9 +1107,9 @@ TRACE_EVENT(xprt_retransmit, __entry->xid = be32_to_cpu(rqst->rq_xid); __entry->ntrans = rqst->rq_ntrans; __assign_str(progname, - task->tk_client->cl_program->name) + task->tk_client->cl_program->name); __entry->version = task->tk_client->cl_vers; - __assign_str(procedure, task->tk_msg.rpc_proc->p_name) + __assign_str(procedure, task->tk_msg.rpc_proc->p_name); ), TP_printk( @@ -1842,7 +1842,7 @@ TRACE_EVENT(svc_xprt_accept, TP_fast_assign( __assign_str(addr, xprt->xpt_remotebuf); - __assign_str(protocol, xprt->xpt_class->xcl_name) + __assign_str(protocol, xprt->xpt_class->xcl_name); __assign_str(service, service); ), diff --git a/include/trace/events/tcp.h b/include/trace/events/tcp.h index ba94857eea..521059d8dc 100644 --- a/include/trace/events/tcp.h +++ b/include/trace/events/tcp.h @@ -295,6 +295,82 @@ TRACE_EVENT(tcp_probe, __entry->srtt, __entry->rcv_wnd, __entry->sock_cookie) ); +#define TP_STORE_ADDR_PORTS_SKB_V4(__entry, skb) \ + do { \ + const struct tcphdr *th = (const struct tcphdr *)skb->data; \ + struct sockaddr_in *v4 = (void *)__entry->saddr; \ + \ + v4->sin_family = AF_INET; \ + v4->sin_port = th->source; \ + v4->sin_addr.s_addr = ip_hdr(skb)->saddr; \ + v4 = (void *)__entry->daddr; \ + v4->sin_family = AF_INET; \ + v4->sin_port = th->dest; \ + v4->sin_addr.s_addr = ip_hdr(skb)->daddr; \ + } while (0) + +#if IS_ENABLED(CONFIG_IPV6) + +#define TP_STORE_ADDR_PORTS_SKB(__entry, skb) \ + do { \ + const struct iphdr *iph = ip_hdr(skb); \ + \ + if (iph->version == 6) { \ + const struct tcphdr *th = (const struct tcphdr *)skb->data; \ + struct sockaddr_in6 *v6 = (void *)__entry->saddr; \ + \ + v6->sin6_family = AF_INET6; \ + v6->sin6_port = th->source; \ + v6->sin6_addr = ipv6_hdr(skb)->saddr; \ + v6 = (void *)__entry->daddr; \ + v6->sin6_family = AF_INET6; \ + v6->sin6_port = th->dest; \ + v6->sin6_addr = ipv6_hdr(skb)->daddr; \ + } else \ + TP_STORE_ADDR_PORTS_SKB_V4(__entry, skb); \ + } while (0) + +#else + +#define TP_STORE_ADDR_PORTS_SKB(__entry, skb) \ + TP_STORE_ADDR_PORTS_SKB_V4(__entry, skb) + +#endif + +/* + * tcp event with only skb + */ +DECLARE_EVENT_CLASS(tcp_event_skb, + + TP_PROTO(const struct sk_buff *skb), + + TP_ARGS(skb), + + TP_STRUCT__entry( + __field(const void *, skbaddr) + __array(__u8, saddr, sizeof(struct sockaddr_in6)) + __array(__u8, daddr, sizeof(struct sockaddr_in6)) + ), + + TP_fast_assign( + __entry->skbaddr = skb; + + memset(__entry->saddr, 0, sizeof(struct sockaddr_in6)); + memset(__entry->daddr, 0, sizeof(struct sockaddr_in6)); + + TP_STORE_ADDR_PORTS_SKB(__entry, skb); + ), + + TP_printk("src=%pISpc dest=%pISpc", __entry->saddr, __entry->daddr) +); + +DEFINE_EVENT(tcp_event_skb, tcp_bad_csum, + + TP_PROTO(const struct sk_buff *skb), + + TP_ARGS(skb) +); + #endif /* _TRACE_TCP_H */ /* This part must be outside protection */ diff --git a/include/trace/events/ufs.h b/include/trace/events/ufs.h index 1cb6f1afba..599739ee7b 100644 --- a/include/trace/events/ufs.h +++ b/include/trace/events/ufs.h @@ -246,6 +246,26 @@ DEFINE_EVENT(ufshcd_template, ufshcd_init, int dev_state, int link_state), TP_ARGS(dev_name, err, usecs, dev_state, link_state)); +DEFINE_EVENT(ufshcd_template, ufshcd_wl_suspend, + TP_PROTO(const char *dev_name, int err, s64 usecs, + int dev_state, int link_state), + TP_ARGS(dev_name, err, usecs, dev_state, link_state)); + +DEFINE_EVENT(ufshcd_template, ufshcd_wl_resume, + TP_PROTO(const char *dev_name, int err, s64 usecs, + int dev_state, int link_state), + TP_ARGS(dev_name, err, usecs, dev_state, link_state)); + +DEFINE_EVENT(ufshcd_template, ufshcd_wl_runtime_suspend, + TP_PROTO(const char *dev_name, int err, s64 usecs, + int dev_state, int link_state), + TP_ARGS(dev_name, err, usecs, dev_state, link_state)); + +DEFINE_EVENT(ufshcd_template, ufshcd_wl_runtime_resume, + TP_PROTO(const char *dev_name, int err, s64 usecs, + int dev_state, int link_state), + TP_ARGS(dev_name, err, usecs, dev_state, link_state)); + TRACE_EVENT(ufshcd_command, TP_PROTO(const char *dev_name, enum ufs_trace_str_t str_t, unsigned int tag, u32 doorbell, int transfer_len, u32 intr, diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h index 2070df6495..88faf2400e 100644 --- a/include/trace/events/vmscan.h +++ b/include/trace/events/vmscan.h @@ -330,7 +330,7 @@ TRACE_EVENT(mm_vmscan_writepage, page_is_file_lru(page)); ), - TP_printk("page=%p pfn=%lu flags=%s", + TP_printk("page=%p pfn=0x%lx flags=%s", pfn_to_page(__entry->pfn), __entry->pfn, show_reclaim_flags(__entry->reclaim_flags)) @@ -423,47 +423,6 @@ TRACE_EVENT(mm_vmscan_lru_shrink_active, show_reclaim_flags(__entry->reclaim_flags)) ); -TRACE_EVENT(mm_vmscan_inactive_list_is_low, - - TP_PROTO(int nid, int reclaim_idx, - unsigned long total_inactive, unsigned long inactive, - unsigned long total_active, unsigned long active, - unsigned long ratio, int file), - - TP_ARGS(nid, reclaim_idx, total_inactive, inactive, total_active, active, ratio, file), - - TP_STRUCT__entry( - __field(int, nid) - __field(int, reclaim_idx) - __field(unsigned long, total_inactive) - __field(unsigned long, inactive) - __field(unsigned long, total_active) - __field(unsigned long, active) - __field(unsigned long, ratio) - __field(int, reclaim_flags) - ), - - TP_fast_assign( - __entry->nid = nid; - __entry->reclaim_idx = reclaim_idx; - __entry->total_inactive = total_inactive; - __entry->inactive = inactive; - __entry->total_active = total_active; - __entry->active = active; - __entry->ratio = ratio; - __entry->reclaim_flags = trace_reclaim_flags(file) & - RECLAIM_WB_LRU; - ), - - TP_printk("nid=%d reclaim_idx=%d total_inactive=%ld inactive=%ld total_active=%ld active=%ld ratio=%ld flags=%s", - __entry->nid, - __entry->reclaim_idx, - __entry->total_inactive, __entry->inactive, - __entry->total_active, __entry->active, - __entry->ratio, - show_reclaim_flags(__entry->reclaim_flags)) -); - TRACE_EVENT(mm_vmscan_node_reclaim_begin, TP_PROTO(int nid, int order, gfp_t gfp_flags), diff --git a/include/trace/events/vsock_virtio_transport_common.h b/include/trace/events/vsock_virtio_transport_common.h index 6782213778..d0b3f0ea9b 100644 --- a/include/trace/events/vsock_virtio_transport_common.h +++ b/include/trace/events/vsock_virtio_transport_common.h @@ -9,9 +9,12 @@ #include TRACE_DEFINE_ENUM(VIRTIO_VSOCK_TYPE_STREAM); +TRACE_DEFINE_ENUM(VIRTIO_VSOCK_TYPE_SEQPACKET); #define show_type(val) \ - __print_symbolic(val, { VIRTIO_VSOCK_TYPE_STREAM, "STREAM" }) + __print_symbolic(val, \ + { VIRTIO_VSOCK_TYPE_STREAM, "STREAM" }, \ + { VIRTIO_VSOCK_TYPE_SEQPACKET, "SEQPACKET" }) TRACE_DEFINE_ENUM(VIRTIO_VSOCK_OP_INVALID); TRACE_DEFINE_ENUM(VIRTIO_VSOCK_OP_REQUEST); diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h index 1efa463c49..840d1ba84c 100644 --- a/include/trace/events/writeback.h +++ b/include/trace/events/writeback.h @@ -36,7 +36,8 @@ EM( WB_REASON_PERIODIC, "periodic") \ EM( WB_REASON_LAPTOP_TIMER, "laptop_timer") \ EM( WB_REASON_FS_FREE_SPACE, "fs_free_space") \ - EMe(WB_REASON_FORKER_THREAD, "forker_thread") + EM( WB_REASON_FORKER_THREAD, "forker_thread") \ + EMe(WB_REASON_FOREIGN_FLUSH, "foreign_flush") WB_WORK_REASON diff --git a/include/trace/events/xdp.h b/include/trace/events/xdp.h index fcad3645a7..c40fc97f94 100644 --- a/include/trace/events/xdp.h +++ b/include/trace/events/xdp.h @@ -110,7 +110,11 @@ DECLARE_EVENT_CLASS(xdp_redirect_template, u32 ifindex = 0, map_index = index; if (map_type == BPF_MAP_TYPE_DEVMAP || map_type == BPF_MAP_TYPE_DEVMAP_HASH) { - ifindex = ((struct _bpf_dtab_netdev *)tgt)->dev->ifindex; + /* Just leave to_ifindex to 0 if do broadcast redirect, + * as tgt will be NULL. + */ + if (tgt) + ifindex = ((struct _bpf_dtab_netdev *)tgt)->dev->ifindex; } else if (map_type == BPF_MAP_TYPE_UNSPEC && map_id == INT_MAX) { ifindex = index; map_index = 0; diff --git a/include/trace/trace_events.h b/include/trace/trace_events.h index 8268bf747d..acc17194c1 100644 --- a/include/trace/trace_events.h +++ b/include/trace/trace_events.h @@ -358,6 +358,21 @@ TRACE_MAKE_SYSTEM_STR(); trace_print_hex_dump_seq(p, prefix_str, prefix_type, \ rowsize, groupsize, buf, len, ascii) +#undef __print_ns_to_secs +#define __print_ns_to_secs(value) \ + ({ \ + u64 ____val = (u64)(value); \ + do_div(____val, NSEC_PER_SEC); \ + ____val; \ + }) + +#undef __print_ns_without_secs +#define __print_ns_without_secs(value) \ + ({ \ + u64 ____val = (u64)(value); \ + (u32) do_div(____val, NSEC_PER_SEC); \ + }) + #undef DECLARE_EVENT_CLASS #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ static notrace enum print_line_t \ @@ -736,6 +751,16 @@ static inline void ftrace_test_probe_##call(void) \ #undef __print_array #undef __print_hex_dump +/* + * The below is not executed in the kernel. It is only what is + * displayed in the print format for userspace to parse. + */ +#undef __print_ns_to_secs +#define __print_ns_to_secs(val) (val) / 1000000000UL + +#undef __print_ns_without_secs +#define __print_ns_without_secs(val) (val) % 1000000000UL + #undef TP_printk #define TP_printk(fmt, args...) "\"" fmt "\", " __stringify(args) diff --git a/include/uapi/asm-generic/mman-common.h b/include/uapi/asm-generic/mman-common.h index f94f65d429..1567a3294c 100644 --- a/include/uapi/asm-generic/mman-common.h +++ b/include/uapi/asm-generic/mman-common.h @@ -72,6 +72,9 @@ #define MADV_COLD 20 /* deactivate these pages */ #define MADV_PAGEOUT 21 /* reclaim these pages */ +#define MADV_POPULATE_READ 22 /* populate (prefault) page tables readable */ +#define MADV_POPULATE_WRITE 23 /* populate (prefault) page tables writable */ + /* compatibility flags */ #define MAP_FILE 0 diff --git a/include/uapi/asm-generic/socket.h b/include/uapi/asm-generic/socket.h index 4dcd13d097..d588c244ec 100644 --- a/include/uapi/asm-generic/socket.h +++ b/include/uapi/asm-generic/socket.h @@ -122,6 +122,8 @@ #define SO_PREFER_BUSY_POLL 69 #define SO_BUSY_POLL_BUDGET 70 +#define SO_NETNS_COOKIE 71 + #if !defined(__KERNEL__) #if __BITS_PER_LONG == 64 || (defined(__x86_64__) && defined(__ILP32__)) diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h index d2a942086f..a9d6fcd95f 100644 --- a/include/uapi/asm-generic/unistd.h +++ b/include/uapi/asm-generic/unistd.h @@ -863,7 +863,8 @@ __SYSCALL(__NR_process_madvise, sys_process_madvise) __SC_COMP(__NR_epoll_pwait2, sys_epoll_pwait2, compat_sys_epoll_pwait2) #define __NR_mount_setattr 442 __SYSCALL(__NR_mount_setattr, sys_mount_setattr) -/* 443 is reserved for quotactl_path */ +#define __NR_quotactl_fd 443 +__SYSCALL(__NR_quotactl_fd, sys_quotactl_fd) #define __NR_landlock_create_ruleset 444 __SYSCALL(__NR_landlock_create_ruleset, sys_landlock_create_ruleset) @@ -872,8 +873,13 @@ __SYSCALL(__NR_landlock_add_rule, sys_landlock_add_rule) #define __NR_landlock_restrict_self 446 __SYSCALL(__NR_landlock_restrict_self, sys_landlock_restrict_self) +#ifdef __ARCH_WANT_MEMFD_SECRET +#define __NR_memfd_secret 447 +__SYSCALL(__NR_memfd_secret, sys_memfd_secret) +#endif + #undef __NR_syscalls -#define __NR_syscalls 447 +#define __NR_syscalls 448 /* * 32 bit systems traditionally used different diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h index 728566542f..0cbd1540ae 100644 --- a/include/uapi/drm/amdgpu_drm.h +++ b/include/uapi/drm/amdgpu_drm.h @@ -116,8 +116,6 @@ extern "C" { #define AMDGPU_GEM_CREATE_CPU_GTT_USWC (1 << 2) /* Flag that the memory should be in VRAM and cleared */ #define AMDGPU_GEM_CREATE_VRAM_CLEARED (1 << 3) -/* Flag that create shadow bo(GTT) while allocating vram bo */ -#define AMDGPU_GEM_CREATE_SHADOW (1 << 4) /* Flag that allocating the BO should use linear VRAM */ #define AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS (1 << 5) /* Flag that BO is always valid in this VM */ @@ -138,6 +136,10 @@ extern "C" { * accessing it with various hw blocks */ #define AMDGPU_GEM_CREATE_ENCRYPTED (1 << 10) +/* Flag that BO will be used only in preemptible context, which does + * not require GTT memory accounting + */ +#define AMDGPU_GEM_CREATE_PREEMPTIBLE (1 << 11) struct drm_amdgpu_gem_create_in { /** the requested memory size */ @@ -755,6 +757,8 @@ struct drm_amdgpu_cs_chunk_data { #define AMDGPU_INFO_VBIOS_SIZE 0x1 /* Subquery id: Query vbios image */ #define AMDGPU_INFO_VBIOS_IMAGE 0x2 + /* Subquery id: Query vbios info */ + #define AMDGPU_INFO_VBIOS_INFO 0x3 /* Query UVD handles */ #define AMDGPU_INFO_NUM_HANDLES 0x1C /* Query sensor related information */ @@ -948,6 +952,15 @@ struct drm_amdgpu_info_firmware { __u32 feature; }; +struct drm_amdgpu_info_vbios { + __u8 name[64]; + __u8 vbios_pn[64]; + __u32 version; + __u32 pad; + __u8 vbios_ver_str[32]; + __u8 date[32]; +}; + #define AMDGPU_VRAM_TYPE_UNKNOWN 0 #define AMDGPU_VRAM_TYPE_GDDR1 1 #define AMDGPU_VRAM_TYPE_DDR2 2 @@ -1121,6 +1134,7 @@ struct drm_amdgpu_info_video_caps { #define AMDGPU_FAMILY_RV 142 /* Raven */ #define AMDGPU_FAMILY_NV 143 /* Navi10 */ #define AMDGPU_FAMILY_VGH 144 /* Van Gogh */ +#define AMDGPU_FAMILY_YC 146 /* Yellow Carp */ #if defined(__cplusplus) } diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h index 67b94bc3c8..d043752a74 100644 --- a/include/uapi/drm/drm.h +++ b/include/uapi/drm/drm.h @@ -777,9 +777,12 @@ struct drm_get_cap { /** * DRM_CLIENT_CAP_STEREO_3D * - * if set to 1, the DRM core will expose the stereo 3D capabilities of the + * If set to 1, the DRM core will expose the stereo 3D capabilities of the * monitor by advertising the supported 3D layouts in the flags of struct - * drm_mode_modeinfo. + * drm_mode_modeinfo. See ``DRM_MODE_FLAG_3D_*``. + * + * This capability is always supported for all drivers starting from kernel + * version 3.13. */ #define DRM_CLIENT_CAP_STEREO_3D 1 @@ -788,6 +791,9 @@ struct drm_get_cap { * * If set to 1, the DRM core will expose all planes (overlay, primary, and * cursor) to userspace. + * + * This capability has been introduced in kernel version 3.15. Starting from + * kernel version 3.17, this capability is always supported for all drivers. */ #define DRM_CLIENT_CAP_UNIVERSAL_PLANES 2 @@ -797,6 +803,13 @@ struct drm_get_cap { * If set to 1, the DRM core will expose atomic properties to userspace. This * implicitly enables &DRM_CLIENT_CAP_UNIVERSAL_PLANES and * &DRM_CLIENT_CAP_ASPECT_RATIO. + * + * If the driver doesn't support atomic mode-setting, enabling this capability + * will fail with -EOPNOTSUPP. + * + * This capability has been introduced in kernel version 4.0. Starting from + * kernel version 4.2, this capability is always supported for atomic-capable + * drivers. */ #define DRM_CLIENT_CAP_ATOMIC 3 @@ -804,6 +817,10 @@ struct drm_get_cap { * DRM_CLIENT_CAP_ASPECT_RATIO * * If set to 1, the DRM core will provide aspect ratio information in modes. + * See ``DRM_MODE_FLAG_PIC_AR_*``. + * + * This capability is always supported for all drivers starting from kernel + * version 4.18. */ #define DRM_CLIENT_CAP_ASPECT_RATIO 4 @@ -811,8 +828,11 @@ struct drm_get_cap { * DRM_CLIENT_CAP_WRITEBACK_CONNECTORS * * If set to 1, the DRM core will expose special connectors to be used for - * writing back to memory the scene setup in the commit. Depends on client - * also supporting DRM_CLIENT_CAP_ATOMIC + * writing back to memory the scene setup in the commit. The client must enable + * &DRM_CLIENT_CAP_ATOMIC first. + * + * This capability is always supported for atomic-capable drivers starting from + * kernel version 4.19. */ #define DRM_CLIENT_CAP_WRITEBACK_CONNECTORS 5 diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h index 07d073d220..d9a7e47ccb 100644 --- a/include/uapi/drm/drm_fourcc.h +++ b/include/uapi/drm/drm_fourcc.h @@ -168,6 +168,13 @@ extern "C" { #define DRM_FORMAT_RGBA1010102 fourcc_code('R', 'A', '3', '0') /* [31:0] R:G:B:A 10:10:10:2 little endian */ #define DRM_FORMAT_BGRA1010102 fourcc_code('B', 'A', '3', '0') /* [31:0] B:G:R:A 10:10:10:2 little endian */ +/* 64 bpp RGB */ +#define DRM_FORMAT_XRGB16161616 fourcc_code('X', 'R', '4', '8') /* [63:0] x:R:G:B 16:16:16:16 little endian */ +#define DRM_FORMAT_XBGR16161616 fourcc_code('X', 'B', '4', '8') /* [63:0] x:B:G:R 16:16:16:16 little endian */ + +#define DRM_FORMAT_ARGB16161616 fourcc_code('A', 'R', '4', '8') /* [63:0] A:R:G:B 16:16:16:16 little endian */ +#define DRM_FORMAT_ABGR16161616 fourcc_code('A', 'B', '4', '8') /* [63:0] A:B:G:R 16:16:16:16 little endian */ + /* * Floating point 64bpp RGB * IEEE 754-2008 binary16 half-precision float diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h index a5e76aa06a..9b6722d45f 100644 --- a/include/uapi/drm/drm_mode.h +++ b/include/uapi/drm/drm_mode.h @@ -413,9 +413,10 @@ enum drm_mode_subconnector { * * **Force-probing a connector** * - * If the @count_modes field is set to zero, the kernel will perform a forced - * probe on the connector to refresh the connector status, modes and EDID. - * A forced-probe can be slow, might cause flickering and the ioctl will block. + * If the @count_modes field is set to zero and the DRM client is the current + * DRM master, the kernel will perform a forced probe on the connector to + * refresh the connector status, modes and EDID. A forced-probe can be slow, + * might cause flickering and the ioctl will block. * * User-space needs to force-probe connectors to ensure their metadata is * up-to-date at startup and after receiving a hot-plug event. User-space diff --git a/include/uapi/drm/etnaviv_drm.h b/include/uapi/drm/etnaviv_drm.h index 09d0df8b71..af024d9045 100644 --- a/include/uapi/drm/etnaviv_drm.h +++ b/include/uapi/drm/etnaviv_drm.h @@ -74,6 +74,9 @@ struct drm_etnaviv_timespec { #define ETNAVIV_PARAM_GPU_NUM_CONSTANTS 0x19 #define ETNAVIV_PARAM_GPU_NUM_VARYINGS 0x1a #define ETNAVIV_PARAM_SOFTPIN_START_ADDR 0x1b +#define ETNAVIV_PARAM_GPU_PRODUCT_ID 0x1c +#define ETNAVIV_PARAM_GPU_CUSTOMER_ID 0x1d +#define ETNAVIV_PARAM_GPU_ECO_ID 0x1e #define ETNA_MAX_PIPES 4 diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h index ddc47bbf48..c2c7759b7d 100644 --- a/include/uapi/drm/i915_drm.h +++ b/include/uapi/drm/i915_drm.h @@ -62,8 +62,8 @@ extern "C" { #define I915_ERROR_UEVENT "ERROR" #define I915_RESET_UEVENT "RESET" -/* - * i915_user_extension: Base class for defining a chain of extensions +/** + * struct i915_user_extension - Base class for defining a chain of extensions * * Many interfaces need to grow over time. In most cases we can simply * extend the struct and have userspace pass in more data. Another option, @@ -76,12 +76,58 @@ extern "C" { * increasing complexity, and for large parts of that interface to be * entirely optional. The downside is more pointer chasing; chasing across * the __user boundary with pointers encapsulated inside u64. + * + * Example chaining: + * + * .. code-block:: C + * + * struct i915_user_extension ext3 { + * .next_extension = 0, // end + * .name = ..., + * }; + * struct i915_user_extension ext2 { + * .next_extension = (uintptr_t)&ext3, + * .name = ..., + * }; + * struct i915_user_extension ext1 { + * .next_extension = (uintptr_t)&ext2, + * .name = ..., + * }; + * + * Typically the struct i915_user_extension would be embedded in some uAPI + * struct, and in this case we would feed it the head of the chain(i.e ext1), + * which would then apply all of the above extensions. + * */ struct i915_user_extension { + /** + * @next_extension: + * + * Pointer to the next struct i915_user_extension, or zero if the end. + */ __u64 next_extension; + /** + * @name: Name of the extension. + * + * Note that the name here is just some integer. + * + * Also note that the name space for this is not global for the whole + * driver, but rather its scope/meaning is limited to the specific piece + * of uAPI which has embedded the struct i915_user_extension. + */ __u32 name; - __u32 flags; /* All undefined bits must be zero. */ - __u32 rsvd[4]; /* Reserved for future use; must be zero. */ + /** + * @flags: MBZ + * + * All undefined bits must be zero. + */ + __u32 flags; + /** + * @rsvd: MBZ + * + * Reserved for future use; must be zero. + */ + __u32 rsvd[4]; }; /* @@ -360,6 +406,7 @@ typedef struct _drm_i915_sarea { #define DRM_I915_QUERY 0x39 #define DRM_I915_GEM_VM_CREATE 0x3a #define DRM_I915_GEM_VM_DESTROY 0x3b +#define DRM_I915_GEM_CREATE_EXT 0x3c /* Must be kept compact -- no holes */ #define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) @@ -392,6 +439,7 @@ typedef struct _drm_i915_sarea { #define DRM_IOCTL_I915_GEM_ENTERVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT) #define DRM_IOCTL_I915_GEM_LEAVEVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT) #define DRM_IOCTL_I915_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create) +#define DRM_IOCTL_I915_GEM_CREATE_EXT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE_EXT, struct drm_i915_gem_create_ext) #define DRM_IOCTL_I915_GEM_PREAD DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread) #define DRM_IOCTL_I915_GEM_PWRITE DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite) #define DRM_IOCTL_I915_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap) @@ -1054,12 +1102,12 @@ struct drm_i915_gem_exec_fence { __u32 flags; }; -/** +/* * See drm_i915_gem_execbuffer_ext_timeline_fences. */ #define DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES 0 -/** +/* * This structure describes an array of drm_syncobj and associated points for * timeline variants of drm_syncobj. It is invalid to append this structure to * the execbuf if I915_EXEC_FENCE_ARRAY is set. @@ -1700,7 +1748,7 @@ struct drm_i915_gem_context_param { __u64 value; }; -/** +/* * Context SSEU programming * * It may be necessary for either functional or performance reason to configure @@ -2067,7 +2115,7 @@ struct drm_i915_perf_open_param { __u64 properties_ptr; }; -/** +/* * Enable data capture for a stream that was either opened in a disabled state * via I915_PERF_FLAG_DISABLED or was later disabled via * I915_PERF_IOCTL_DISABLE. @@ -2081,7 +2129,7 @@ struct drm_i915_perf_open_param { */ #define I915_PERF_IOCTL_ENABLE _IO('i', 0x0) -/** +/* * Disable data capture for a stream. * * It is an error to try and read a stream that is disabled. @@ -2090,7 +2138,7 @@ struct drm_i915_perf_open_param { */ #define I915_PERF_IOCTL_DISABLE _IO('i', 0x1) -/** +/* * Change metrics_set captured by a stream. * * If the stream is bound to a specific context, the configuration change @@ -2103,7 +2151,7 @@ struct drm_i915_perf_open_param { */ #define I915_PERF_IOCTL_CONFIG _IO('i', 0x2) -/** +/* * Common to all i915 perf records */ struct drm_i915_perf_record_header { @@ -2151,7 +2199,7 @@ enum drm_i915_perf_record_type { DRM_I915_PERF_RECORD_MAX /* non-ABI */ }; -/** +/* * Structure to upload perf dynamic configuration into the kernel. */ struct drm_i915_perf_oa_config { @@ -2172,53 +2220,95 @@ struct drm_i915_perf_oa_config { __u64 flex_regs_ptr; }; +/** + * struct drm_i915_query_item - An individual query for the kernel to process. + * + * The behaviour is determined by the @query_id. Note that exactly what + * @data_ptr is also depends on the specific @query_id. + */ struct drm_i915_query_item { + /** @query_id: The id for this query */ __u64 query_id; #define DRM_I915_QUERY_TOPOLOGY_INFO 1 #define DRM_I915_QUERY_ENGINE_INFO 2 #define DRM_I915_QUERY_PERF_CONFIG 3 +#define DRM_I915_QUERY_MEMORY_REGIONS 4 /* Must be kept compact -- no holes and well documented */ - /* + /** + * @length: + * * When set to zero by userspace, this is filled with the size of the - * data to be written at the data_ptr pointer. The kernel sets this + * data to be written at the @data_ptr pointer. The kernel sets this * value to a negative value to signal an error on a particular query * item. */ __s32 length; - /* + /** + * @flags: + * * When query_id == DRM_I915_QUERY_TOPOLOGY_INFO, must be 0. * * When query_id == DRM_I915_QUERY_PERF_CONFIG, must be one of the - * following : - * - DRM_I915_QUERY_PERF_CONFIG_LIST - * - DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID - * - DRM_I915_QUERY_PERF_CONFIG_FOR_UUID + * following: + * + * - DRM_I915_QUERY_PERF_CONFIG_LIST + * - DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID + * - DRM_I915_QUERY_PERF_CONFIG_FOR_UUID */ __u32 flags; #define DRM_I915_QUERY_PERF_CONFIG_LIST 1 #define DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID 2 #define DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID 3 - /* - * Data will be written at the location pointed by data_ptr when the - * value of length matches the length of the data to be written by the + /** + * @data_ptr: + * + * Data will be written at the location pointed by @data_ptr when the + * value of @length matches the length of the data to be written by the * kernel. */ __u64 data_ptr; }; +/** + * struct drm_i915_query - Supply an array of struct drm_i915_query_item for the + * kernel to fill out. + * + * Note that this is generally a two step process for each struct + * drm_i915_query_item in the array: + * + * 1. Call the DRM_IOCTL_I915_QUERY, giving it our array of struct + * drm_i915_query_item, with &drm_i915_query_item.length set to zero. The + * kernel will then fill in the size, in bytes, which tells userspace how + * memory it needs to allocate for the blob(say for an array of properties). + * + * 2. Next we call DRM_IOCTL_I915_QUERY again, this time with the + * &drm_i915_query_item.data_ptr equal to our newly allocated blob. Note that + * the &drm_i915_query_item.length should still be the same as what the + * kernel previously set. At this point the kernel can fill in the blob. + * + * Note that for some query items it can make sense for userspace to just pass + * in a buffer/blob equal to or larger than the required size. In this case only + * a single ioctl call is needed. For some smaller query items this can work + * quite well. + * + */ struct drm_i915_query { + /** @num_items: The number of elements in the @items_ptr array */ __u32 num_items; - /* - * Unused for now. Must be cleared to zero. + /** + * @flags: Unused for now. Must be cleared to zero. */ __u32 flags; - /* - * This points to an array of num_items drm_i915_query_item structures. + /** + * @items_ptr: + * + * Pointer to an array of struct drm_i915_query_item. The number of + * array elements is @num_items. */ __u64 items_ptr; }; @@ -2292,21 +2382,21 @@ struct drm_i915_query_topology_info { * Describes one engine and it's capabilities as known to the driver. */ struct drm_i915_engine_info { - /** Engine class and instance. */ + /** @engine: Engine class and instance. */ struct i915_engine_class_instance engine; - /** Reserved field. */ + /** @rsvd0: Reserved field. */ __u32 rsvd0; - /** Engine flags. */ + /** @flags: Engine flags. */ __u64 flags; - /** Capabilities of this engine. */ + /** @capabilities: Capabilities of this engine. */ __u64 capabilities; #define I915_VIDEO_CLASS_CAPABILITY_HEVC (1 << 0) #define I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC (1 << 1) - /** Reserved fields. */ + /** @rsvd1: Reserved fields. */ __u64 rsvd1[4]; }; @@ -2317,13 +2407,13 @@ struct drm_i915_engine_info { * an array of struct drm_i915_engine_info structures. */ struct drm_i915_query_engine_info { - /** Number of struct drm_i915_engine_info structs following. */ + /** @num_engines: Number of struct drm_i915_engine_info structs following. */ __u32 num_engines; - /** MBZ */ + /** @rsvd: MBZ */ __u32 rsvd[3]; - /** Marker for drm_i915_engine_info structures. */ + /** @engines: Marker for drm_i915_engine_info structures. */ struct drm_i915_engine_info engines[]; }; @@ -2377,6 +2467,241 @@ struct drm_i915_query_perf_config { __u8 data[]; }; +/** + * enum drm_i915_gem_memory_class - Supported memory classes + */ +enum drm_i915_gem_memory_class { + /** @I915_MEMORY_CLASS_SYSTEM: System memory */ + I915_MEMORY_CLASS_SYSTEM = 0, + /** @I915_MEMORY_CLASS_DEVICE: Device local-memory */ + I915_MEMORY_CLASS_DEVICE, +}; + +/** + * struct drm_i915_gem_memory_class_instance - Identify particular memory region + */ +struct drm_i915_gem_memory_class_instance { + /** @memory_class: See enum drm_i915_gem_memory_class */ + __u16 memory_class; + + /** @memory_instance: Which instance */ + __u16 memory_instance; +}; + +/** + * struct drm_i915_memory_region_info - Describes one region as known to the + * driver. + * + * Note that we reserve some stuff here for potential future work. As an example + * we might want expose the capabilities for a given region, which could include + * things like if the region is CPU mappable/accessible, what are the supported + * mapping types etc. + * + * Note that to extend struct drm_i915_memory_region_info and struct + * drm_i915_query_memory_regions in the future the plan is to do the following: + * + * .. code-block:: C + * + * struct drm_i915_memory_region_info { + * struct drm_i915_gem_memory_class_instance region; + * union { + * __u32 rsvd0; + * __u32 new_thing1; + * }; + * ... + * union { + * __u64 rsvd1[8]; + * struct { + * __u64 new_thing2; + * __u64 new_thing3; + * ... + * }; + * }; + * }; + * + * With this things should remain source compatible between versions for + * userspace, even as we add new fields. + * + * Note this is using both struct drm_i915_query_item and struct drm_i915_query. + * For this new query we are adding the new query id DRM_I915_QUERY_MEMORY_REGIONS + * at &drm_i915_query_item.query_id. + */ +struct drm_i915_memory_region_info { + /** @region: The class:instance pair encoding */ + struct drm_i915_gem_memory_class_instance region; + + /** @rsvd0: MBZ */ + __u32 rsvd0; + + /** @probed_size: Memory probed by the driver (-1 = unknown) */ + __u64 probed_size; + + /** @unallocated_size: Estimate of memory remaining (-1 = unknown) */ + __u64 unallocated_size; + + /** @rsvd1: MBZ */ + __u64 rsvd1[8]; +}; + +/** + * struct drm_i915_query_memory_regions + * + * The region info query enumerates all regions known to the driver by filling + * in an array of struct drm_i915_memory_region_info structures. + * + * Example for getting the list of supported regions: + * + * .. code-block:: C + * + * struct drm_i915_query_memory_regions *info; + * struct drm_i915_query_item item = { + * .query_id = DRM_I915_QUERY_MEMORY_REGIONS; + * }; + * struct drm_i915_query query = { + * .num_items = 1, + * .items_ptr = (uintptr_t)&item, + * }; + * int err, i; + * + * // First query the size of the blob we need, this needs to be large + * // enough to hold our array of regions. The kernel will fill out the + * // item.length for us, which is the number of bytes we need. + * err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query); + * if (err) ... + * + * info = calloc(1, item.length); + * // Now that we allocated the required number of bytes, we call the ioctl + * // again, this time with the data_ptr pointing to our newly allocated + * // blob, which the kernel can then populate with the all the region info. + * item.data_ptr = (uintptr_t)&info, + * + * err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query); + * if (err) ... + * + * // We can now access each region in the array + * for (i = 0; i < info->num_regions; i++) { + * struct drm_i915_memory_region_info mr = info->regions[i]; + * u16 class = mr.region.class; + * u16 instance = mr.region.instance; + * + * .... + * } + * + * free(info); + */ +struct drm_i915_query_memory_regions { + /** @num_regions: Number of supported regions */ + __u32 num_regions; + + /** @rsvd: MBZ */ + __u32 rsvd[3]; + + /** @regions: Info about each supported region */ + struct drm_i915_memory_region_info regions[]; +}; + +/** + * struct drm_i915_gem_create_ext - Existing gem_create behaviour, with added + * extension support using struct i915_user_extension. + * + * Note that in the future we want to have our buffer flags here, at least for + * the stuff that is immutable. Previously we would have two ioctls, one to + * create the object with gem_create, and another to apply various parameters, + * however this creates some ambiguity for the params which are considered + * immutable. Also in general we're phasing out the various SET/GET ioctls. + */ +struct drm_i915_gem_create_ext { + /** + * @size: Requested size for the object. + * + * The (page-aligned) allocated size for the object will be returned. + * + * Note that for some devices we have might have further minimum + * page-size restrictions(larger than 4K), like for device local-memory. + * However in general the final size here should always reflect any + * rounding up, if for example using the I915_GEM_CREATE_EXT_MEMORY_REGIONS + * extension to place the object in device local-memory. + */ + __u64 size; + /** + * @handle: Returned handle for the object. + * + * Object handles are nonzero. + */ + __u32 handle; + /** @flags: MBZ */ + __u32 flags; + /** + * @extensions: The chain of extensions to apply to this object. + * + * This will be useful in the future when we need to support several + * different extensions, and we need to apply more than one when + * creating the object. See struct i915_user_extension. + * + * If we don't supply any extensions then we get the same old gem_create + * behaviour. + * + * For I915_GEM_CREATE_EXT_MEMORY_REGIONS usage see + * struct drm_i915_gem_create_ext_memory_regions. + */ +#define I915_GEM_CREATE_EXT_MEMORY_REGIONS 0 + __u64 extensions; +}; + +/** + * struct drm_i915_gem_create_ext_memory_regions - The + * I915_GEM_CREATE_EXT_MEMORY_REGIONS extension. + * + * Set the object with the desired set of placements/regions in priority + * order. Each entry must be unique and supported by the device. + * + * This is provided as an array of struct drm_i915_gem_memory_class_instance, or + * an equivalent layout of class:instance pair encodings. See struct + * drm_i915_query_memory_regions and DRM_I915_QUERY_MEMORY_REGIONS for how to + * query the supported regions for a device. + * + * As an example, on discrete devices, if we wish to set the placement as + * device local-memory we can do something like: + * + * .. code-block:: C + * + * struct drm_i915_gem_memory_class_instance region_lmem = { + * .memory_class = I915_MEMORY_CLASS_DEVICE, + * .memory_instance = 0, + * }; + * struct drm_i915_gem_create_ext_memory_regions regions = { + * .base = { .name = I915_GEM_CREATE_EXT_MEMORY_REGIONS }, + * .regions = (uintptr_t)®ion_lmem, + * .num_regions = 1, + * }; + * struct drm_i915_gem_create_ext create_ext = { + * .size = 16 * PAGE_SIZE, + * .extensions = (uintptr_t)®ions, + * }; + * + * int err = ioctl(fd, DRM_IOCTL_I915_GEM_CREATE_EXT, &create_ext); + * if (err) ... + * + * At which point we get the object handle in &drm_i915_gem_create_ext.handle, + * along with the final object size in &drm_i915_gem_create_ext.size, which + * should account for any rounding up, if required. + */ +struct drm_i915_gem_create_ext_memory_regions { + /** @base: Extension link. See struct i915_user_extension. */ + struct i915_user_extension base; + + /** @pad: MBZ */ + __u32 pad; + /** @num_regions: Number of elements in the @regions array. */ + __u32 num_regions; + /** + * @regions: The regions/placements array. + * + * An array of struct drm_i915_gem_memory_class_instance. + */ + __u64 regions; +}; + #if defined(__cplusplus) } #endif diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h index 5596d7c37f..f075851021 100644 --- a/include/uapi/drm/msm_drm.h +++ b/include/uapi/drm/msm_drm.h @@ -94,13 +94,12 @@ struct drm_msm_param { /* cache modes */ #define MSM_BO_CACHED 0x00010000 #define MSM_BO_WC 0x00020000 -#define MSM_BO_UNCACHED 0x00040000 +#define MSM_BO_UNCACHED 0x00040000 /* deprecated, use MSM_BO_WC */ +#define MSM_BO_CACHED_COHERENT 0x080000 #define MSM_BO_FLAGS (MSM_BO_SCANOUT | \ MSM_BO_GPU_READONLY | \ - MSM_BO_CACHED | \ - MSM_BO_WC | \ - MSM_BO_UNCACHED) + MSM_BO_CACHE_MASK) struct drm_msm_gem_new { __u64 size; /* in */ diff --git a/include/uapi/drm/panfrost_drm.h b/include/uapi/drm/panfrost_drm.h index ec19db1eea..061e700dd0 100644 --- a/include/uapi/drm/panfrost_drm.h +++ b/include/uapi/drm/panfrost_drm.h @@ -171,6 +171,7 @@ enum drm_panfrost_param { DRM_PANFROST_PARAM_JS_FEATURES15, DRM_PANFROST_PARAM_NR_CORE_GROUPS, DRM_PANFROST_PARAM_THREAD_TLS_ALLOC, + DRM_PANFROST_PARAM_AFBC_FEATURES, }; struct drm_panfrost_get_param { diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h index cd2d8279a5..daa481729e 100644 --- a/include/uapi/linux/audit.h +++ b/include/uapi/linux/audit.h @@ -48,7 +48,7 @@ * 2500 - 2999 future user space (maybe integrity labels and related events) * * Messages from 1000-1199 are bi-directional. 1200-1299 & 2100 - 2999 are - * exclusively user space. 1300-2099 is kernel --> user space + * exclusively user space. 1300-2099 is kernel --> user space * communication. */ #define AUDIT_GET 1000 /* Get status */ @@ -78,7 +78,7 @@ #define AUDIT_LAST_USER_MSG 1199 #define AUDIT_FIRST_USER_MSG2 2100 /* More user space messages */ #define AUDIT_LAST_USER_MSG2 2999 - + #define AUDIT_DAEMON_START 1200 /* Daemon startup record */ #define AUDIT_DAEMON_END 1201 /* Daemon normal stop record */ #define AUDIT_DAEMON_ABORT 1202 /* Daemon error stop record */ diff --git a/include/uapi/linux/auxvec.h b/include/uapi/linux/auxvec.h index abe5f2b658..c7e502bf5a 100644 --- a/include/uapi/linux/auxvec.h +++ b/include/uapi/linux/auxvec.h @@ -33,5 +33,8 @@ #define AT_EXECFN 31 /* filename of program */ +#ifndef AT_MINSIGSTKSZ +#define AT_MINSIGSTKSZ 51 /* minimal stack size for signal delivery */ +#endif #endif /* _UAPI_LINUX_AUXVEC_H */ diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index ec6d85a817..bf9252c738 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -527,6 +527,15 @@ union bpf_iter_link_info { * Look up an element with the given *key* in the map referred to * by the file descriptor *fd*, and if found, delete the element. * + * For **BPF_MAP_TYPE_QUEUE** and **BPF_MAP_TYPE_STACK** map + * types, the *flags* argument needs to be set to 0, but for other + * map types, it may be specified as: + * + * **BPF_F_LOCK** + * Look up and delete the value of a spin-locked map + * without returning the lock. This must be specified if + * the elements contain a spinlock. + * * The **BPF_MAP_TYPE_QUEUE** and **BPF_MAP_TYPE_STACK** map types * implement this command as a "pop" operation, deleting the top * element rather than one corresponding to *key*. @@ -536,6 +545,10 @@ union bpf_iter_link_info { * This command is only valid for the following map types: * * **BPF_MAP_TYPE_QUEUE** * * **BPF_MAP_TYPE_STACK** + * * **BPF_MAP_TYPE_HASH** + * * **BPF_MAP_TYPE_PERCPU_HASH** + * * **BPF_MAP_TYPE_LRU_HASH** + * * **BPF_MAP_TYPE_LRU_PERCPU_HASH** * * Return * Returns zero on success. On error, -1 is returned and *errno* @@ -837,6 +850,7 @@ enum bpf_cmd { BPF_PROG_ATTACH, BPF_PROG_DETACH, BPF_PROG_TEST_RUN, + BPF_PROG_RUN = BPF_PROG_TEST_RUN, BPF_PROG_GET_NEXT_ID, BPF_MAP_GET_NEXT_ID, BPF_PROG_GET_FD_BY_ID, @@ -937,6 +951,7 @@ enum bpf_prog_type { BPF_PROG_TYPE_EXT, BPF_PROG_TYPE_LSM, BPF_PROG_TYPE_SK_LOOKUP, + BPF_PROG_TYPE_SYSCALL, /* a program that can execute syscalls */ }; enum bpf_attach_type { @@ -979,6 +994,8 @@ enum bpf_attach_type { BPF_SK_LOOKUP, BPF_XDP, BPF_SK_SKB_VERDICT, + BPF_SK_REUSEPORT_SELECT, + BPF_SK_REUSEPORT_SELECT_OR_MIGRATE, __MAX_BPF_ATTACH_TYPE }; @@ -1097,8 +1114,8 @@ enum bpf_link_type { /* When BPF ldimm64's insn[0].src_reg != 0 then this can have * the following extensions: * - * insn[0].src_reg: BPF_PSEUDO_MAP_FD - * insn[0].imm: map fd + * insn[0].src_reg: BPF_PSEUDO_MAP_[FD|IDX] + * insn[0].imm: map fd or fd_idx * insn[1].imm: 0 * insn[0].off: 0 * insn[1].off: 0 @@ -1106,15 +1123,19 @@ enum bpf_link_type { * verifier type: CONST_PTR_TO_MAP */ #define BPF_PSEUDO_MAP_FD 1 -/* insn[0].src_reg: BPF_PSEUDO_MAP_VALUE - * insn[0].imm: map fd +#define BPF_PSEUDO_MAP_IDX 5 + +/* insn[0].src_reg: BPF_PSEUDO_MAP_[IDX_]VALUE + * insn[0].imm: map fd or fd_idx * insn[1].imm: offset into value * insn[0].off: 0 * insn[1].off: 0 * ldimm64 rewrite: address of map[0]+offset * verifier type: PTR_TO_MAP_VALUE */ -#define BPF_PSEUDO_MAP_VALUE 2 +#define BPF_PSEUDO_MAP_VALUE 2 +#define BPF_PSEUDO_MAP_IDX_VALUE 6 + /* insn[0].src_reg: BPF_PSEUDO_BTF_ID * insn[0].imm: kernel btd id of VAR * insn[1].imm: 0 @@ -1314,6 +1335,8 @@ union bpf_attr { /* or valid module BTF object fd or 0 to attach to vmlinux */ __u32 attach_btf_obj_fd; }; + __u32 :32; /* pad */ + __aligned_u64 fd_array; /* array of FDs */ }; struct { /* anonymous struct used by BPF_OBJ_* commands */ @@ -2534,8 +2557,12 @@ union bpf_attr { * The lower two bits of *flags* are used as the return code if * the map lookup fails. This is so that the return value can be * one of the XDP program return codes up to **XDP_TX**, as chosen - * by the caller. Any higher bits in the *flags* argument must be - * unset. + * by the caller. The higher bits of *flags* can be set to + * BPF_F_BROADCAST or BPF_F_EXCLUDE_INGRESS as defined below. + * + * With BPF_F_BROADCAST the packet will be broadcasted to all the + * interfaces in the map, with BPF_F_EXCLUDE_INGRESS the ingress + * interface will be excluded when do broadcasting. * * See also **bpf_redirect**\ (), which only supports redirecting * to an ifindex, but doesn't require a map to do so. @@ -4735,6 +4762,24 @@ union bpf_attr { * be zero-terminated except when **str_size** is 0. * * Or **-EBUSY** if the per-CPU memory copy buffer is busy. + * + * long bpf_sys_bpf(u32 cmd, void *attr, u32 attr_size) + * Description + * Execute bpf syscall with given arguments. + * Return + * A syscall result. + * + * long bpf_btf_find_by_name_kind(char *name, int name_sz, u32 kind, int flags) + * Description + * Find BTF type with given name and kind in vmlinux BTF or in module's BTFs. + * Return + * Returns btf_id and btf_obj_fd in lower and upper 32 bits. + * + * long bpf_sys_close(u32 fd) + * Description + * Execute close syscall for given FD. + * Return + * A syscall result. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -4903,6 +4948,9 @@ union bpf_attr { FN(check_mtu), \ FN(for_each_map_elem), \ FN(snprintf), \ + FN(sys_bpf), \ + FN(btf_find_by_name_kind), \ + FN(sys_close), \ /* */ /* integer value in 'imm' field of BPF_CALL instruction selects which helper @@ -5080,6 +5128,12 @@ enum { BPF_F_BPRM_SECUREEXEC = (1ULL << 0), }; +/* Flags for bpf_redirect_map helper */ +enum { + BPF_F_BROADCAST = (1ULL << 3), + BPF_F_EXCLUDE_INGRESS = (1ULL << 4), +}; + #define __bpf_md_ptr(type, name) \ union { \ type name; \ @@ -5364,6 +5418,20 @@ struct sk_reuseport_md { __u32 ip_protocol; /* IP protocol. e.g. IPPROTO_TCP, IPPROTO_UDP */ __u32 bind_inany; /* Is sock bound to an INANY address? */ __u32 hash; /* A hash of the packet 4 tuples */ + /* When reuse->migrating_sk is NULL, it is selecting a sk for the + * new incoming connection request (e.g. selecting a listen sk for + * the received SYN in the TCP case). reuse->sk is one of the sk + * in the reuseport group. The bpf prog can use reuse->sk to learn + * the local listening ip/port without looking into the skb. + * + * When reuse->migrating_sk is not NULL, reuse->sk is closed and + * reuse->migrating_sk is the socket that needs to be migrated + * to another listening socket. migrating_sk could be a fullsock + * sk that is fully established or a reqsk that is in-the-middle + * of 3-way handshake. + */ + __bpf_md_ptr(struct bpf_sock *, sk); + __bpf_md_ptr(struct bpf_sock *, migrating_sk); }; #define BPF_TAG_SIZE 8 diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h index 5df73001aa..22cd037123 100644 --- a/include/uapi/linux/btrfs.h +++ b/include/uapi/linux/btrfs.h @@ -154,7 +154,7 @@ struct btrfs_scrub_progress { __u64 tree_bytes_scrubbed; /* # of tree bytes scrubbed */ __u64 read_errors; /* # of read errors encountered (EIO) */ __u64 csum_errors; /* # of failed csum checks */ - __u64 verify_errors; /* # of occurences, where the metadata + __u64 verify_errors; /* # of occurrences, where the metadata * of a tree block did not match the * expected values, like generation or * logical */ @@ -174,7 +174,7 @@ struct btrfs_scrub_progress { __u64 last_physical; /* last physical address scrubbed. In * case a scrub was aborted, this can * be used to restart the scrub */ - __u64 unverified_errors; /* # of occurences where a read for a + __u64 unverified_errors; /* # of occurrences where a read for a * full (64k) bio failed, but the re- * check succeeded for each 4k piece. * Intermittent error. */ diff --git a/include/uapi/linux/can.h b/include/uapi/linux/can.h index c7535352fe..90801ada2b 100644 --- a/include/uapi/linux/can.h +++ b/include/uapi/linux/can.h @@ -123,8 +123,8 @@ struct can_frame { /* * defined bits for canfd_frame.flags * - * The use of struct canfd_frame implies the Extended Data Length (EDL) bit to - * be set in the CAN frame bitstream on the wire. The EDL bit switch turns + * The use of struct canfd_frame implies the FD Frame (FDF) bit to + * be set in the CAN frame bitstream on the wire. The FDF bit switch turns * the CAN controllers bitstream processor into the CAN FD mode which creates * two new options within the CAN FD frame specification: * @@ -135,9 +135,18 @@ struct can_frame { * controller only the CANFD_BRS bit is relevant for real CAN controllers when * building a CAN FD frame for transmission. Setting the CANFD_ESI bit can make * sense for virtual CAN interfaces to test applications with echoed frames. + * + * The struct can_frame and struct canfd_frame intentionally share the same + * layout to be able to write CAN frame content into a CAN FD frame structure. + * When this is done the former differentiation via CAN_MTU / CANFD_MTU gets + * lost. CANFD_FDF allows programmers to mark CAN FD frames in the case of + * using struct canfd_frame for mixed CAN / CAN FD content (dual use). + * N.B. the Kernel APIs do NOT provide mixed CAN / CAN FD content inside of + * struct canfd_frame therefore the CANFD_FDF flag is disregarded by Linux. */ #define CANFD_BRS 0x01 /* bit rate switch (second bitrate for payload data) */ #define CANFD_ESI 0x02 /* error state indicator of the transmitting node */ +#define CANFD_FDF 0x04 /* mark CAN FD for dual use of struct canfd_frame */ /** * struct canfd_frame - CAN flexible data rate frame structure diff --git a/include/uapi/linux/cxl_mem.h b/include/uapi/linux/cxl_mem.h index 3155382dfc..f6e8a005b1 100644 --- a/include/uapi/linux/cxl_mem.h +++ b/include/uapi/linux/cxl_mem.h @@ -29,6 +29,18 @@ ___C(GET_LSA, "Get Label Storage Area"), \ ___C(GET_HEALTH_INFO, "Get Health Info"), \ ___C(GET_LOG, "Get Log"), \ + ___C(SET_PARTITION_INFO, "Set Partition Information"), \ + ___C(SET_LSA, "Set Label Storage Area"), \ + ___C(GET_ALERT_CONFIG, "Get Alert Configuration"), \ + ___C(SET_ALERT_CONFIG, "Set Alert Configuration"), \ + ___C(GET_SHUTDOWN_STATE, "Get Shutdown State"), \ + ___C(SET_SHUTDOWN_STATE, "Set Shutdown State"), \ + ___C(GET_POISON, "Get Poison List"), \ + ___C(INJECT_POISON, "Inject Poison"), \ + ___C(CLEAR_POISON, "Clear Poison"), \ + ___C(GET_SCAN_MEDIA_CAPS, "Get Scan Media Capabilities"), \ + ___C(SCAN_MEDIA, "Scan Media"), \ + ___C(GET_SCAN_MEDIA, "Get Scan Media Results"), \ ___C(MAX, "invalid / last command") #define ___C(a, b) CXL_MEM_COMMAND_ID_##a diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h index f6008b2fa6..32f53a0069 100644 --- a/include/uapi/linux/devlink.h +++ b/include/uapi/linux/devlink.h @@ -126,6 +126,11 @@ enum devlink_command { DEVLINK_CMD_HEALTH_REPORTER_TEST, + DEVLINK_CMD_RATE_GET, /* can dump */ + DEVLINK_CMD_RATE_SET, + DEVLINK_CMD_RATE_NEW, + DEVLINK_CMD_RATE_DEL, + /* add new commands above here */ __DEVLINK_CMD_MAX, DEVLINK_CMD_MAX = __DEVLINK_CMD_MAX - 1 @@ -206,6 +211,11 @@ enum devlink_port_flavour { */ }; +enum devlink_rate_type { + DEVLINK_RATE_TYPE_LEAF, + DEVLINK_RATE_TYPE_NODE, +}; + enum devlink_param_cmode { DEVLINK_PARAM_CMODE_RUNTIME, DEVLINK_PARAM_CMODE_DRIVERINIT, @@ -534,6 +544,13 @@ enum devlink_attr { DEVLINK_ATTR_RELOAD_ACTION_STATS, /* nested */ DEVLINK_ATTR_PORT_PCI_SF_NUMBER, /* u32 */ + + DEVLINK_ATTR_RATE_TYPE, /* u16 */ + DEVLINK_ATTR_RATE_TX_SHARE, /* u64 */ + DEVLINK_ATTR_RATE_TX_MAX, /* u64 */ + DEVLINK_ATTR_RATE_NODE_NAME, /* string */ + DEVLINK_ATTR_RATE_PARENT_NODE_NAME, /* string */ + /* add new attributes above here, update the policy in devlink.c */ __DEVLINK_ATTR_MAX, diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h index cfef6b0816..67aa7134b3 100644 --- a/include/uapi/linux/ethtool.h +++ b/include/uapi/linux/ethtool.h @@ -233,7 +233,7 @@ enum tunable_id { ETHTOOL_PFC_PREVENTION_TOUT, /* timeout in msecs */ /* * Add your fresh new tunable attribute above and remember to update - * tunable_strings[] in net/core/ethtool.c + * tunable_strings[] in net/ethtool/common.c */ __ETHTOOL_TUNABLE_COUNT, }; @@ -297,7 +297,7 @@ enum phy_tunable_id { ETHTOOL_PHY_EDPD, /* * Add your fresh new phy tunable attribute above and remember to update - * phy_tunable_strings[] in net/core/ethtool.c + * phy_tunable_strings[] in net/ethtool/common.c */ __ETHTOOL_PHY_TUNABLE_COUNT, }; diff --git a/include/uapi/linux/ethtool_netlink.h b/include/uapi/linux/ethtool_netlink.h index 825cfda1c5..b3b93710ef 100644 --- a/include/uapi/linux/ethtool_netlink.h +++ b/include/uapi/linux/ethtool_netlink.h @@ -46,6 +46,7 @@ enum { ETHTOOL_MSG_FEC_SET, ETHTOOL_MSG_MODULE_EEPROM_GET, ETHTOOL_MSG_STATS_GET, + ETHTOOL_MSG_PHC_VCLOCKS_GET, /* add new constants above here */ __ETHTOOL_MSG_USER_CNT, @@ -88,6 +89,7 @@ enum { ETHTOOL_MSG_FEC_NTF, ETHTOOL_MSG_MODULE_EEPROM_GET_REPLY, ETHTOOL_MSG_STATS_GET_REPLY, + ETHTOOL_MSG_PHC_VCLOCKS_GET_REPLY, /* add new constants above here */ __ETHTOOL_MSG_KERNEL_CNT, @@ -440,6 +442,19 @@ enum { ETHTOOL_A_TSINFO_MAX = (__ETHTOOL_A_TSINFO_CNT - 1) }; +/* PHC VCLOCKS */ + +enum { + ETHTOOL_A_PHC_VCLOCKS_UNSPEC, + ETHTOOL_A_PHC_VCLOCKS_HEADER, /* nest - _A_HEADER_* */ + ETHTOOL_A_PHC_VCLOCKS_NUM, /* u32 */ + ETHTOOL_A_PHC_VCLOCKS_INDEX, /* array, s32 */ + + /* add new constants above here */ + __ETHTOOL_A_PHC_VCLOCKS_CNT, + ETHTOOL_A_PHC_VCLOCKS_MAX = (__ETHTOOL_A_PHC_VCLOCKS_CNT - 1) +}; + /* CABLE TEST */ enum { @@ -675,7 +690,7 @@ enum { ETHTOOL_A_MODULE_EEPROM_PAGE, /* u8 */ ETHTOOL_A_MODULE_EEPROM_BANK, /* u8 */ ETHTOOL_A_MODULE_EEPROM_I2C_ADDRESS, /* u8 */ - ETHTOOL_A_MODULE_EEPROM_DATA, /* nested */ + ETHTOOL_A_MODULE_EEPROM_DATA, /* binary */ __ETHTOOL_A_MODULE_EEPROM_CNT, ETHTOOL_A_MODULE_EEPROM_MAX = (__ETHTOOL_A_MODULE_EEPROM_CNT - 1) diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h index 271ae90a9b..36ed092227 100644 --- a/include/uapi/linux/fuse.h +++ b/include/uapi/linux/fuse.h @@ -181,6 +181,9 @@ * - add FUSE_OPEN_KILL_SUIDGID * - extend fuse_setxattr_in, add FUSE_SETXATTR_EXT * - add FUSE_SETXATTR_ACL_KILL_SGID + * + * 7.34 + * - add FUSE_SYNCFS */ #ifndef _LINUX_FUSE_H @@ -216,7 +219,7 @@ #define FUSE_KERNEL_VERSION 7 /** Minor version number of this interface */ -#define FUSE_KERNEL_MINOR_VERSION 33 +#define FUSE_KERNEL_MINOR_VERSION 34 /** The node ID of the root inode */ #define FUSE_ROOT_ID 1 @@ -509,6 +512,7 @@ enum fuse_opcode { FUSE_COPY_FILE_RANGE = 47, FUSE_SETUPMAPPING = 48, FUSE_REMOVEMAPPING = 49, + FUSE_SYNCFS = 50, /* CUSE specific operations */ CUSE_INIT = 4096, @@ -971,4 +975,8 @@ struct fuse_removemapping_one { #define FUSE_REMOVEMAPPING_MAX_ENTRY \ (PAGE_SIZE / sizeof(struct fuse_removemapping_one)) +struct fuse_syncfs_in { + uint64_t padding; +}; + #endif /* _LINUX_FUSE_H */ diff --git a/include/uapi/linux/futex.h b/include/uapi/linux/futex.h index a89eb0accd..235e5b2fac 100644 --- a/include/uapi/linux/futex.h +++ b/include/uapi/linux/futex.h @@ -21,6 +21,7 @@ #define FUTEX_WAKE_BITSET 10 #define FUTEX_WAIT_REQUEUE_PI 11 #define FUTEX_CMP_REQUEUE_PI 12 +#define FUTEX_LOCK_PI2 13 #define FUTEX_PRIVATE_FLAG 128 #define FUTEX_CLOCK_REALTIME 256 @@ -32,6 +33,7 @@ #define FUTEX_CMP_REQUEUE_PRIVATE (FUTEX_CMP_REQUEUE | FUTEX_PRIVATE_FLAG) #define FUTEX_WAKE_OP_PRIVATE (FUTEX_WAKE_OP | FUTEX_PRIVATE_FLAG) #define FUTEX_LOCK_PI_PRIVATE (FUTEX_LOCK_PI | FUTEX_PRIVATE_FLAG) +#define FUTEX_LOCK_PI2_PRIVATE (FUTEX_LOCK_PI2 | FUTEX_PRIVATE_FLAG) #define FUTEX_UNLOCK_PI_PRIVATE (FUTEX_UNLOCK_PI | FUTEX_PRIVATE_FLAG) #define FUTEX_TRYLOCK_PI_PRIVATE (FUTEX_TRYLOCK_PI | FUTEX_PRIVATE_FLAG) #define FUTEX_WAIT_BITSET_PRIVATE (FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG) diff --git a/include/uapi/linux/icmp.h b/include/uapi/linux/icmp.h index c1da8244c5..163c0998ae 100644 --- a/include/uapi/linux/icmp.h +++ b/include/uapi/linux/icmp.h @@ -20,7 +20,6 @@ #include #include -#include #include #include @@ -154,7 +153,7 @@ struct icmp_ext_echo_iio { struct { struct icmp_ext_echo_ctype3_hdr ctype3_hdr; union { - struct in_addr ipv4_addr; + __be32 ipv4_addr; struct in6_addr ipv6_addr; } ip_addr; } addr; diff --git a/include/uapi/linux/if_bridge.h b/include/uapi/linux/if_bridge.h index 13d59c51ef..6b56a75495 100644 --- a/include/uapi/linux/if_bridge.h +++ b/include/uapi/linux/if_bridge.h @@ -627,6 +627,8 @@ enum { MDBA_ROUTER_PATTR_UNSPEC, MDBA_ROUTER_PATTR_TIMER, MDBA_ROUTER_PATTR_TYPE, + MDBA_ROUTER_PATTR_INET_TIMER, + MDBA_ROUTER_PATTR_INET6_TIMER, __MDBA_ROUTER_PATTR_MAX }; #define MDBA_ROUTER_PATTR_MAX (__MDBA_ROUTER_PATTR_MAX - 1) diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index cd5b382a41..4882e81514 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -341,6 +341,13 @@ enum { IFLA_ALT_IFNAME, /* Alternative ifname */ IFLA_PERM_ADDRESS, IFLA_PROTO_DOWN_REASON, + + /* device (sysfs) name as parent, used instead + * of IFLA_LINK where there's no parent netdev + */ + IFLA_PARENT_DEV_NAME, + IFLA_PARENT_DEV_BUS_NAME, + __IFLA_MAX }; @@ -1236,6 +1243,8 @@ enum { #define RMNET_FLAGS_INGRESS_MAP_COMMANDS (1U << 1) #define RMNET_FLAGS_INGRESS_MAP_CKSUMV4 (1U << 2) #define RMNET_FLAGS_EGRESS_MAP_CKSUMV4 (1U << 3) +#define RMNET_FLAGS_INGRESS_MAP_CKSUMV5 (1U << 4) +#define RMNET_FLAGS_EGRESS_MAP_CKSUMV5 (1U << 5) enum { IFLA_RMNET_UNSPEC, diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index 162ff99ed2..79126d5cd2 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -46,21 +46,17 @@ struct io_uring_sqe { __u32 unlink_flags; }; __u64 user_data; /* data to be passed back at completion time */ + /* pack this to avoid bogus arm OABI complaints */ union { - struct { - /* pack this to avoid bogus arm OABI complaints */ - union { - /* index into fixed buffers, if used */ - __u16 buf_index; - /* for grouped buffer selection */ - __u16 buf_group; - } __attribute__((packed)); - /* personality to use, if used */ - __u16 personality; - __s32 splice_fd_in; - }; - __u64 __pad2[3]; - }; + /* index into fixed buffers, if used */ + __u16 buf_index; + /* for grouped buffer selection */ + __u16 buf_group; + } __attribute__((packed)); + /* personality to use, if used */ + __u16 personality; + __s32 splice_fd_in; + __u64 __pad2[2]; }; enum { @@ -306,6 +302,10 @@ enum { IORING_REGISTER_BUFFERS2 = 15, IORING_REGISTER_BUFFERS_UPDATE = 16, + /* set/clear io-wq thread affinities */ + IORING_REGISTER_IOWQ_AFF = 17, + IORING_UNREGISTER_IOWQ_AFF = 18, + /* this goes last */ IORING_REGISTER_LAST }; diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h index bf5e7d7846..3cb5b5dd9f 100644 --- a/include/uapi/linux/kfd_ioctl.h +++ b/include/uapi/linux/kfd_ioctl.h @@ -30,9 +30,10 @@ * - 1.1 - initial version * - 1.3 - Add SMI events support * - 1.4 - Indicate new SRAM EDC bit in device properties + * - 1.5 - Add SVM API */ #define KFD_IOCTL_MAJOR_VERSION 1 -#define KFD_IOCTL_MINOR_VERSION 4 +#define KFD_IOCTL_MINOR_VERSION 5 struct kfd_ioctl_get_version_args { __u32 major_version; /* from KFD */ @@ -473,6 +474,167 @@ enum kfd_mmio_remap { KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL = 4, }; +/* Guarantee host access to memory */ +#define KFD_IOCTL_SVM_FLAG_HOST_ACCESS 0x00000001 +/* Fine grained coherency between all devices with access */ +#define KFD_IOCTL_SVM_FLAG_COHERENT 0x00000002 +/* Use any GPU in same hive as preferred device */ +#define KFD_IOCTL_SVM_FLAG_HIVE_LOCAL 0x00000004 +/* GPUs only read, allows replication */ +#define KFD_IOCTL_SVM_FLAG_GPU_RO 0x00000008 +/* Allow execution on GPU */ +#define KFD_IOCTL_SVM_FLAG_GPU_EXEC 0x00000010 +/* GPUs mostly read, may allow similar optimizations as RO, but writes fault */ +#define KFD_IOCTL_SVM_FLAG_GPU_READ_MOSTLY 0x00000020 + +/** + * kfd_ioctl_svm_op - SVM ioctl operations + * + * @KFD_IOCTL_SVM_OP_SET_ATTR: Modify one or more attributes + * @KFD_IOCTL_SVM_OP_GET_ATTR: Query one or more attributes + */ +enum kfd_ioctl_svm_op { + KFD_IOCTL_SVM_OP_SET_ATTR, + KFD_IOCTL_SVM_OP_GET_ATTR +}; + +/** kfd_ioctl_svm_location - Enum for preferred and prefetch locations + * + * GPU IDs are used to specify GPUs as preferred and prefetch locations. + * Below definitions are used for system memory or for leaving the preferred + * location unspecified. + */ +enum kfd_ioctl_svm_location { + KFD_IOCTL_SVM_LOCATION_SYSMEM = 0, + KFD_IOCTL_SVM_LOCATION_UNDEFINED = 0xffffffff +}; + +/** + * kfd_ioctl_svm_attr_type - SVM attribute types + * + * @KFD_IOCTL_SVM_ATTR_PREFERRED_LOC: gpuid of the preferred location, 0 for + * system memory + * @KFD_IOCTL_SVM_ATTR_PREFETCH_LOC: gpuid of the prefetch location, 0 for + * system memory. Setting this triggers an + * immediate prefetch (migration). + * @KFD_IOCTL_SVM_ATTR_ACCESS: + * @KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE: + * @KFD_IOCTL_SVM_ATTR_NO_ACCESS: specify memory access for the gpuid given + * by the attribute value + * @KFD_IOCTL_SVM_ATTR_SET_FLAGS: bitmask of flags to set (see + * KFD_IOCTL_SVM_FLAG_...) + * @KFD_IOCTL_SVM_ATTR_CLR_FLAGS: bitmask of flags to clear + * @KFD_IOCTL_SVM_ATTR_GRANULARITY: migration granularity + * (log2 num pages) + */ +enum kfd_ioctl_svm_attr_type { + KFD_IOCTL_SVM_ATTR_PREFERRED_LOC, + KFD_IOCTL_SVM_ATTR_PREFETCH_LOC, + KFD_IOCTL_SVM_ATTR_ACCESS, + KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE, + KFD_IOCTL_SVM_ATTR_NO_ACCESS, + KFD_IOCTL_SVM_ATTR_SET_FLAGS, + KFD_IOCTL_SVM_ATTR_CLR_FLAGS, + KFD_IOCTL_SVM_ATTR_GRANULARITY +}; + +/** + * kfd_ioctl_svm_attribute - Attributes as pairs of type and value + * + * The meaning of the @value depends on the attribute type. + * + * @type: attribute type (see enum @kfd_ioctl_svm_attr_type) + * @value: attribute value + */ +struct kfd_ioctl_svm_attribute { + __u32 type; + __u32 value; +}; + +/** + * kfd_ioctl_svm_args - Arguments for SVM ioctl + * + * @op specifies the operation to perform (see enum + * @kfd_ioctl_svm_op). @start_addr and @size are common for all + * operations. + * + * A variable number of attributes can be given in @attrs. + * @nattr specifies the number of attributes. New attributes can be + * added in the future without breaking the ABI. If unknown attributes + * are given, the function returns -EINVAL. + * + * @KFD_IOCTL_SVM_OP_SET_ATTR sets attributes for a virtual address + * range. It may overlap existing virtual address ranges. If it does, + * the existing ranges will be split such that the attribute changes + * only apply to the specified address range. + * + * @KFD_IOCTL_SVM_OP_GET_ATTR returns the intersection of attributes + * over all memory in the given range and returns the result as the + * attribute value. If different pages have different preferred or + * prefetch locations, 0xffffffff will be returned for + * @KFD_IOCTL_SVM_ATTR_PREFERRED_LOC or + * @KFD_IOCTL_SVM_ATTR_PREFETCH_LOC resepctively. For + * @KFD_IOCTL_SVM_ATTR_SET_FLAGS, flags of all pages will be + * aggregated by bitwise AND. The minimum migration granularity + * throughout the range will be returned for + * @KFD_IOCTL_SVM_ATTR_GRANULARITY. + * + * Querying of accessibility attributes works by initializing the + * attribute type to @KFD_IOCTL_SVM_ATTR_ACCESS and the value to the + * GPUID being queried. Multiple attributes can be given to allow + * querying multiple GPUIDs. The ioctl function overwrites the + * attribute type to indicate the access for the specified GPU. + * + * @KFD_IOCTL_SVM_ATTR_CLR_FLAGS is invalid for + * @KFD_IOCTL_SVM_OP_GET_ATTR. + */ +struct kfd_ioctl_svm_args { + __u64 start_addr; + __u64 size; + __u32 op; + __u32 nattr; + /* Variable length array of attributes */ + struct kfd_ioctl_svm_attribute attrs[0]; +}; + +/** + * kfd_ioctl_set_xnack_mode_args - Arguments for set_xnack_mode + * + * @xnack_enabled: [in/out] Whether to enable XNACK mode for this process + * + * @xnack_enabled indicates whether recoverable page faults should be + * enabled for the current process. 0 means disabled, positive means + * enabled, negative means leave unchanged. If enabled, virtual address + * translations on GFXv9 and later AMD GPUs can return XNACK and retry + * the access until a valid PTE is available. This is used to implement + * device page faults. + * + * On output, @xnack_enabled returns the (new) current mode (0 or + * positive). Therefore, a negative input value can be used to query + * the current mode without changing it. + * + * The XNACK mode fundamentally changes the way SVM managed memory works + * in the driver, with subtle effects on application performance and + * functionality. + * + * Enabling XNACK mode requires shader programs to be compiled + * differently. Furthermore, not all GPUs support changing the mode + * per-process. Therefore changing the mode is only allowed while no + * user mode queues exist in the process. This ensure that no shader + * code is running that may be compiled for the wrong mode. And GPUs + * that cannot change to the requested mode will prevent the XNACK + * mode from occurring. All GPUs used by the process must be in the + * same XNACK mode. + * + * GFXv8 or older GPUs do not support 48 bit virtual addresses or SVM. + * Therefore those GPUs are not considered for the XNACK mode switch. + * + * Return: 0 on success, -errno on failure + */ +struct kfd_ioctl_set_xnack_mode_args { + __s32 xnack_enabled; +}; + #define AMDKFD_IOCTL_BASE 'K' #define AMDKFD_IO(nr) _IO(AMDKFD_IOCTL_BASE, nr) #define AMDKFD_IOR(nr, type) _IOR(AMDKFD_IOCTL_BASE, nr, type) @@ -573,7 +735,12 @@ enum kfd_mmio_remap { #define AMDKFD_IOC_SMI_EVENTS \ AMDKFD_IOWR(0x1F, struct kfd_ioctl_smi_events_args) +#define AMDKFD_IOC_SVM AMDKFD_IOWR(0x20, struct kfd_ioctl_svm_args) + +#define AMDKFD_IOC_SET_XNACK_MODE \ + AMDKFD_IOWR(0x21, struct kfd_ioctl_set_xnack_mode_args) + #define AMDKFD_COMMAND_START 0x01 -#define AMDKFD_COMMAND_END 0x20 +#define AMDKFD_COMMAND_END 0x22 #endif diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 79d9c44d1a..d9e4aabcb3 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -280,6 +280,9 @@ struct kvm_xen_exit { /* Encounter unexpected vm-exit reason */ #define KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON 4 +/* Flags that describe what fields in emulation_failure hold valid data. */ +#define KVM_INTERNAL_ERROR_EMULATION_FLAG_INSTRUCTION_BYTES (1ULL << 0) + /* for KVM_RUN, returned by mmap(vcpu_fd, offset=0) */ struct kvm_run { /* in */ @@ -383,6 +386,25 @@ struct kvm_run { __u32 ndata; __u64 data[16]; } internal; + /* + * KVM_INTERNAL_ERROR_EMULATION + * + * "struct emulation_failure" is an overlay of "struct internal" + * that is used for the KVM_INTERNAL_ERROR_EMULATION sub-type of + * KVM_EXIT_INTERNAL_ERROR. Note, unlike other internal error + * sub-types, this struct is ABI! It also needs to be backwards + * compatible with "struct internal". Take special care that + * "ndata" is correct, that new fields are enumerated in "flags", + * and that each flag enumerates fields that are 64-bit aligned + * and sized (so that ndata+internal.data[] is valid/accurate). + */ + struct { + __u32 suberror; + __u32 ndata; + __u64 flags; + __u8 insn_size; + __u8 insn_bytes[15]; + } emulation_failure; /* KVM_EXIT_OSI */ struct { __u64 gprs[32]; @@ -1083,6 +1105,13 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_SGX_ATTRIBUTE 196 #define KVM_CAP_VM_COPY_ENC_CONTEXT_FROM 197 #define KVM_CAP_PTP_KVM 198 +#define KVM_CAP_HYPERV_ENFORCE_CPUID 199 +#define KVM_CAP_SREGS2 200 +#define KVM_CAP_EXIT_HYPERCALL 201 +#define KVM_CAP_PPC_RPT_INVALIDATE 202 +#define KVM_CAP_BINARY_STATS_FD 203 +#define KVM_CAP_EXIT_ON_EMULATION_FAILURE 204 +#define KVM_CAP_ARM_MTE 205 #ifdef KVM_CAP_IRQ_ROUTING @@ -1428,6 +1457,7 @@ struct kvm_s390_ucas_mapping { /* Available with KVM_CAP_PMU_EVENT_FILTER */ #define KVM_SET_PMU_EVENT_FILTER _IOW(KVMIO, 0xb2, struct kvm_pmu_event_filter) #define KVM_PPC_SVM_OFF _IO(KVMIO, 0xb3) +#define KVM_ARM_MTE_COPY_TAGS _IOR(KVMIO, 0xb4, struct kvm_arm_copy_mte_tags) /* ioctl for vm fd */ #define KVM_CREATE_DEVICE _IOWR(KVMIO, 0xe0, struct kvm_create_device) @@ -1621,6 +1651,9 @@ struct kvm_xen_hvm_attr { #define KVM_XEN_VCPU_GET_ATTR _IOWR(KVMIO, 0xca, struct kvm_xen_vcpu_attr) #define KVM_XEN_VCPU_SET_ATTR _IOW(KVMIO, 0xcb, struct kvm_xen_vcpu_attr) +#define KVM_GET_SREGS2 _IOR(KVMIO, 0xcc, struct kvm_sregs2) +#define KVM_SET_SREGS2 _IOW(KVMIO, 0xcd, struct kvm_sregs2) + struct kvm_xen_vcpu_attr { __u16 type; __u16 pad[3]; @@ -1899,4 +1932,76 @@ struct kvm_dirty_gfn { #define KVM_BUS_LOCK_DETECTION_OFF (1 << 0) #define KVM_BUS_LOCK_DETECTION_EXIT (1 << 1) +/** + * struct kvm_stats_header - Header of per vm/vcpu binary statistics data. + * @flags: Some extra information for header, always 0 for now. + * @name_size: The size in bytes of the memory which contains statistics + * name string including trailing '\0'. The memory is allocated + * at the send of statistics descriptor. + * @num_desc: The number of statistics the vm or vcpu has. + * @id_offset: The offset of the vm/vcpu stats' id string in the file pointed + * by vm/vcpu stats fd. + * @desc_offset: The offset of the vm/vcpu stats' descriptor block in the file + * pointd by vm/vcpu stats fd. + * @data_offset: The offset of the vm/vcpu stats' data block in the file + * pointed by vm/vcpu stats fd. + * + * This is the header userspace needs to read from stats fd before any other + * readings. It is used by userspace to discover all the information about the + * vm/vcpu's binary statistics. + * Userspace reads this header from the start of the vm/vcpu's stats fd. + */ +struct kvm_stats_header { + __u32 flags; + __u32 name_size; + __u32 num_desc; + __u32 id_offset; + __u32 desc_offset; + __u32 data_offset; +}; + +#define KVM_STATS_TYPE_SHIFT 0 +#define KVM_STATS_TYPE_MASK (0xF << KVM_STATS_TYPE_SHIFT) +#define KVM_STATS_TYPE_CUMULATIVE (0x0 << KVM_STATS_TYPE_SHIFT) +#define KVM_STATS_TYPE_INSTANT (0x1 << KVM_STATS_TYPE_SHIFT) +#define KVM_STATS_TYPE_PEAK (0x2 << KVM_STATS_TYPE_SHIFT) +#define KVM_STATS_TYPE_MAX KVM_STATS_TYPE_PEAK + +#define KVM_STATS_UNIT_SHIFT 4 +#define KVM_STATS_UNIT_MASK (0xF << KVM_STATS_UNIT_SHIFT) +#define KVM_STATS_UNIT_NONE (0x0 << KVM_STATS_UNIT_SHIFT) +#define KVM_STATS_UNIT_BYTES (0x1 << KVM_STATS_UNIT_SHIFT) +#define KVM_STATS_UNIT_SECONDS (0x2 << KVM_STATS_UNIT_SHIFT) +#define KVM_STATS_UNIT_CYCLES (0x3 << KVM_STATS_UNIT_SHIFT) +#define KVM_STATS_UNIT_MAX KVM_STATS_UNIT_CYCLES + +#define KVM_STATS_BASE_SHIFT 8 +#define KVM_STATS_BASE_MASK (0xF << KVM_STATS_BASE_SHIFT) +#define KVM_STATS_BASE_POW10 (0x0 << KVM_STATS_BASE_SHIFT) +#define KVM_STATS_BASE_POW2 (0x1 << KVM_STATS_BASE_SHIFT) +#define KVM_STATS_BASE_MAX KVM_STATS_BASE_POW2 + +/** + * struct kvm_stats_desc - Descriptor of a KVM statistics. + * @flags: Annotations of the stats, like type, unit, etc. + * @exponent: Used together with @flags to determine the unit. + * @size: The number of data items for this stats. + * Every data item is of type __u64. + * @offset: The offset of the stats to the start of stat structure in + * struture kvm or kvm_vcpu. + * @unused: Unused field for future usage. Always 0 for now. + * @name: The name string for the stats. Its size is indicated by the + * &kvm_stats_header->name_size. + */ +struct kvm_stats_desc { + __u32 flags; + __s16 exponent; + __u16 size; + __u32 offset; + __u32 unused; + char name[]; +}; + +#define KVM_GET_STATS_FD _IO(KVMIO, 0xce) + #endif /* __LINUX_KVM_H */ diff --git a/include/uapi/linux/kvm_para.h b/include/uapi/linux/kvm_para.h index 8b86609849..960c7e93d1 100644 --- a/include/uapi/linux/kvm_para.h +++ b/include/uapi/linux/kvm_para.h @@ -29,6 +29,7 @@ #define KVM_HC_CLOCK_PAIRING 9 #define KVM_HC_SEND_IPI 10 #define KVM_HC_SCHED_YIELD 11 +#define KVM_HC_MAP_GPA_RANGE 12 /* * hypercalls use architecture specific diff --git a/include/uapi/linux/lirc.h b/include/uapi/linux/lirc.h index c45a4eaea6..9919f2062b 100644 --- a/include/uapi/linux/lirc.h +++ b/include/uapi/linux/lirc.h @@ -1,7 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ /* * lirc.h - linux infrared remote control header file - * last modified 2010/07/13 by Jarod Wilson */ #ifndef _LINUX_LIRC_H diff --git a/include/uapi/linux/magic.h b/include/uapi/linux/magic.h index f3956fc11d..35687dcb1a 100644 --- a/include/uapi/linux/magic.h +++ b/include/uapi/linux/magic.h @@ -97,5 +97,6 @@ #define DEVMEM_MAGIC 0x454d444d /* "DMEM" */ #define Z3FOLD_MAGIC 0x33 #define PPC_CMM_MAGIC 0xc7571590 +#define SECRETMEM_MAGIC 0x5345434d /* "SECM" */ #endif /* __LINUX_MAGIC_H__ */ diff --git a/include/uapi/linux/mempolicy.h b/include/uapi/linux/mempolicy.h index 4832fd0b56..19a00bc7fe 100644 --- a/include/uapi/linux/mempolicy.h +++ b/include/uapi/linux/mempolicy.h @@ -60,7 +60,6 @@ enum { * are never OR'ed into the mode in mempolicy API arguments. */ #define MPOL_F_SHARED (1 << 0) /* identify shared policies */ -#define MPOL_F_LOCAL (1 << 1) /* preferred local allocation */ #define MPOL_F_MOF (1 << 3) /* this policy wants migrate on fault */ #define MPOL_F_MORON (1 << 4) /* Migrate On protnone Reference On Node */ diff --git a/include/uapi/linux/mount.h b/include/uapi/linux/mount.h index e6524ead2b..dd7a166fdf 100644 --- a/include/uapi/linux/mount.h +++ b/include/uapi/linux/mount.h @@ -120,6 +120,7 @@ enum fsconfig_command { #define MOUNT_ATTR_STRICTATIME 0x00000020 /* - Always perform atime updates */ #define MOUNT_ATTR_NODIRATIME 0x00000080 /* Do not update directory access times */ #define MOUNT_ATTR_IDMAP 0x00100000 /* Idmap mount to @userns_fd in struct mount_attr. */ +#define MOUNT_ATTR_NOSYMFOLLOW 0x00200000 /* Do not follow symlinks */ /* * mount_setattr() diff --git a/include/uapi/linux/mptcp.h b/include/uapi/linux/mptcp.h index 8eb3c0844b..7b05f71023 100644 --- a/include/uapi/linux/mptcp.h +++ b/include/uapi/linux/mptcp.h @@ -105,6 +105,7 @@ struct mptcp_info { __u64 mptcpi_rcv_nxt; __u8 mptcpi_local_addr_used; __u8 mptcpi_local_addr_max; + __u8 mptcpi_csum_enabled; }; /* diff --git a/include/uapi/linux/nbd-netlink.h b/include/uapi/linux/nbd-netlink.h index c5d0ef7aa7..2d0b909642 100644 --- a/include/uapi/linux/nbd-netlink.h +++ b/include/uapi/linux/nbd-netlink.h @@ -35,6 +35,7 @@ enum { NBD_ATTR_SOCKETS, NBD_ATTR_DEAD_CONN_TIMEOUT, NBD_ATTR_DEVICE_LIST, + NBD_ATTR_BACKEND_IDENTIFIER, __NBD_ATTR_MAX, }; #define NBD_ATTR_MAX (__NBD_ATTR_MAX - 1) diff --git a/include/uapi/linux/net_tstamp.h b/include/uapi/linux/net_tstamp.h index 7ed0b3d1c0..fcc61c73a6 100644 --- a/include/uapi/linux/net_tstamp.h +++ b/include/uapi/linux/net_tstamp.h @@ -13,7 +13,7 @@ #include #include /* for SO_TIMESTAMPING */ -/* SO_TIMESTAMPING gets an integer bit field comprised of these values */ +/* SO_TIMESTAMPING flags */ enum { SOF_TIMESTAMPING_TX_HARDWARE = (1<<0), SOF_TIMESTAMPING_TX_SOFTWARE = (1<<1), @@ -30,8 +30,9 @@ enum { SOF_TIMESTAMPING_OPT_STATS = (1<<12), SOF_TIMESTAMPING_OPT_PKTINFO = (1<<13), SOF_TIMESTAMPING_OPT_TX_SWHW = (1<<14), + SOF_TIMESTAMPING_BIND_PHC = (1 << 15), - SOF_TIMESTAMPING_LAST = SOF_TIMESTAMPING_OPT_TX_SWHW, + SOF_TIMESTAMPING_LAST = SOF_TIMESTAMPING_BIND_PHC, SOF_TIMESTAMPING_MASK = (SOF_TIMESTAMPING_LAST - 1) | SOF_TIMESTAMPING_LAST }; @@ -46,6 +47,18 @@ enum { SOF_TIMESTAMPING_TX_SCHED | \ SOF_TIMESTAMPING_TX_ACK) +/** + * struct so_timestamping - SO_TIMESTAMPING parameter + * + * @flags: SO_TIMESTAMPING flags + * @bind_phc: Index of PTP virtual clock bound to sock. This is available + * if flag SOF_TIMESTAMPING_BIND_PHC is set. + */ +struct so_timestamping { + int flags; + int bind_phc; +}; + /** * struct hwtstamp_config - %SIOCGHWTSTAMP and %SIOCSHWTSTAMP parameter * diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index 1fb4ca18ff..e94d1fa554 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@ -813,11 +813,13 @@ enum nft_exthdr_flags { * @NFT_EXTHDR_OP_IPV6: match against ipv6 extension headers * @NFT_EXTHDR_OP_TCP: match against tcp options * @NFT_EXTHDR_OP_IPV4: match against ipv4 options + * @NFT_EXTHDR_OP_SCTP: match against sctp chunks */ enum nft_exthdr_op { NFT_EXTHDR_OP_IPV6, NFT_EXTHDR_OP_TCPOPT, NFT_EXTHDR_OP_IPV4, + NFT_EXTHDR_OP_SCTP, __NFT_EXTHDR_OP_MAX }; #define NFT_EXTHDR_OP_MAX (__NFT_EXTHDR_OP_MAX - 1) @@ -1193,6 +1195,21 @@ enum nft_counter_attributes { }; #define NFTA_COUNTER_MAX (__NFTA_COUNTER_MAX - 1) +/** + * enum nft_last_attributes - nf_tables last expression netlink attributes + * + * @NFTA_LAST_SET: last update has been set, zero means never updated (NLA_U32) + * @NFTA_LAST_MSECS: milliseconds since last update (NLA_U64) + */ +enum nft_last_attributes { + NFTA_LAST_UNSPEC, + NFTA_LAST_SET, + NFTA_LAST_MSECS, + NFTA_LAST_PAD, + __NFTA_LAST_MAX +}; +#define NFTA_LAST_MAX (__NFTA_LAST_MAX - 1) + /** * enum nft_log_attributes - nf_tables log expression netlink attributes * diff --git a/include/uapi/linux/netfilter/nfnetlink.h b/include/uapi/linux/netfilter/nfnetlink.h index 5bc960f220..6cd58cd2a6 100644 --- a/include/uapi/linux/netfilter/nfnetlink.h +++ b/include/uapi/linux/netfilter/nfnetlink.h @@ -60,7 +60,8 @@ struct nfgenmsg { #define NFNL_SUBSYS_CTHELPER 9 #define NFNL_SUBSYS_NFTABLES 10 #define NFNL_SUBSYS_NFT_COMPAT 11 -#define NFNL_SUBSYS_COUNT 12 +#define NFNL_SUBSYS_HOOK 12 +#define NFNL_SUBSYS_COUNT 13 /* Reserved control nfnetlink messages */ #define NFNL_MSG_BATCH_BEGIN NLMSG_MIN_TYPE diff --git a/include/uapi/linux/netfilter/nfnetlink_hook.h b/include/uapi/linux/netfilter/nfnetlink_hook.h new file mode 100644 index 0000000000..912ec60b26 --- /dev/null +++ b/include/uapi/linux/netfilter/nfnetlink_hook.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _NFNL_HOOK_H_ +#define _NFNL_HOOK_H_ + +enum nfnl_hook_msg_types { + NFNL_MSG_HOOK_GET, + NFNL_MSG_HOOK_MAX, +}; + +/** + * enum nfnl_hook_attributes - netfilter hook netlink attributes + * + * @NFNLA_HOOK_HOOKNUM: netfilter hook number (NLA_U32) + * @NFNLA_HOOK_PRIORITY: netfilter hook priority (NLA_U32) + * @NFNLA_HOOK_DEV: netdevice name (NLA_STRING) + * @NFNLA_HOOK_FUNCTION_NAME: hook function name (NLA_STRING) + * @NFNLA_HOOK_MODULE_NAME: kernel module that registered this hook (NLA_STRING) + * @NFNLA_HOOK_CHAIN_INFO: basechain hook metadata (NLA_NESTED) + */ +enum nfnl_hook_attributes { + NFNLA_HOOK_UNSPEC, + NFNLA_HOOK_HOOKNUM, + NFNLA_HOOK_PRIORITY, + NFNLA_HOOK_DEV, + NFNLA_HOOK_FUNCTION_NAME, + NFNLA_HOOK_MODULE_NAME, + NFNLA_HOOK_CHAIN_INFO, + __NFNLA_HOOK_MAX +}; +#define NFNLA_HOOK_MAX (__NFNLA_HOOK_MAX - 1) + +/** + * enum nfnl_hook_chain_info_attributes - chain description + * + * NFNLA_HOOK_INFO_DESC: nft chain and table name (enum nft_table_attributes) (NLA_NESTED) + * NFNLA_HOOK_INFO_TYPE: chain type (enum nfnl_hook_chaintype) (NLA_U32) + */ +enum nfnl_hook_chain_info_attributes { + NFNLA_HOOK_INFO_UNSPEC, + NFNLA_HOOK_INFO_DESC, + NFNLA_HOOK_INFO_TYPE, + __NFNLA_HOOK_INFO_MAX, +}; +#define NFNLA_HOOK_INFO_MAX (__NFNLA_HOOK_INFO_MAX - 1) + +/** + * enum nfnl_hook_chaintype - chain type + * + * @NFNL_HOOK_TYPE_NFTABLES nf_tables base chain + */ +enum nfnl_hook_chaintype { + NFNL_HOOK_TYPE_NFTABLES = 0x1, +}; + +#endif /* _NFNL_HOOK_H */ diff --git a/include/uapi/linux/netfilter/nfnetlink_log.h b/include/uapi/linux/netfilter/nfnetlink_log.h index 45c8d3b027..0af9c113d6 100644 --- a/include/uapi/linux/netfilter/nfnetlink_log.h +++ b/include/uapi/linux/netfilter/nfnetlink_log.h @@ -61,7 +61,7 @@ enum nfulnl_attr_type { NFULA_HWTYPE, /* hardware type */ NFULA_HWHEADER, /* hardware header */ NFULA_HWLEN, /* hardware header length */ - NFULA_CT, /* nf_conntrack_netlink.h */ + NFULA_CT, /* nfnetlink_conntrack.h */ NFULA_CT_INFO, /* enum ip_conntrack_info */ NFULA_VLAN, /* nested attribute: packet vlan info */ NFULA_L2HDR, /* full L2 header */ diff --git a/include/uapi/linux/netfilter/nfnetlink_queue.h b/include/uapi/linux/netfilter/nfnetlink_queue.h index bcb2cb5d40..aed90c4df0 100644 --- a/include/uapi/linux/netfilter/nfnetlink_queue.h +++ b/include/uapi/linux/netfilter/nfnetlink_queue.h @@ -51,11 +51,11 @@ enum nfqnl_attr_type { NFQA_IFINDEX_PHYSOUTDEV, /* __u32 ifindex */ NFQA_HWADDR, /* nfqnl_msg_packet_hw */ NFQA_PAYLOAD, /* opaque data payload */ - NFQA_CT, /* nf_conntrack_netlink.h */ + NFQA_CT, /* nfnetlink_conntrack.h */ NFQA_CT_INFO, /* enum ip_conntrack_info */ NFQA_CAP_LEN, /* __u32 length of captured packet */ NFQA_SKB_INFO, /* __u32 skb meta information */ - NFQA_EXP, /* nf_conntrack_netlink.h */ + NFQA_EXP, /* nfnetlink_conntrack.h */ NFQA_UID, /* __u32 sk uid */ NFQA_GID, /* __u32 sk gid */ NFQA_SECCTX, /* security context string */ diff --git a/include/uapi/linux/netfilter/xt_DSCP.h b/include/uapi/linux/netfilter/xt_DSCP.h index 223d635e8b..7594e4df85 100644 --- a/include/uapi/linux/netfilter/xt_DSCP.h +++ b/include/uapi/linux/netfilter/xt_DSCP.h @@ -1,27 +1,32 @@ /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -/* x_tables module for setting the IPv4/IPv6 DSCP field +/* x_tables module for matching the IPv4/IPv6 DSCP field * * (C) 2002 Harald Welte - * based on ipt_FTOS.c (C) 2000 by Matthew G. Marsh * This software is distributed under GNU GPL v2, 1991 * * See RFC2474 for a description of the DSCP field within the IP Header. * - * xt_DSCP.h,v 1.7 2002/03/14 12:03:13 laforge Exp + * xt_dscp.h,v 1.3 2002/08/05 19:00:21 laforge Exp */ -#ifndef _XT_DSCP_TARGET_H -#define _XT_DSCP_TARGET_H -#include +#ifndef _XT_DSCP_H +#define _XT_DSCP_H + #include -/* target info */ -struct xt_DSCP_info { +#define XT_DSCP_MASK 0xfc /* 11111100 */ +#define XT_DSCP_SHIFT 2 +#define XT_DSCP_MAX 0x3f /* 00111111 */ + +/* match info */ +struct xt_dscp_info { __u8 dscp; + __u8 invert; }; -struct xt_tos_target_info { - __u8 tos_value; +struct xt_tos_match_info { __u8 tos_mask; + __u8 tos_value; + __u8 invert; }; -#endif /* _XT_DSCP_TARGET_H */ +#endif /* _XT_DSCP_H */ diff --git a/include/uapi/linux/netfilter/xt_tcpmss.h b/include/uapi/linux/netfilter/xt_tcpmss.h index 2268f58b4d..65ea6c9dab 100644 --- a/include/uapi/linux/netfilter/xt_tcpmss.h +++ b/include/uapi/linux/netfilter/xt_tcpmss.h @@ -1,12 +1,13 @@ /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -#ifndef _XT_TCPMSS_MATCH_H -#define _XT_TCPMSS_MATCH_H +#ifndef _XT_TCPMSS_H +#define _XT_TCPMSS_H #include -struct xt_tcpmss_match_info { - __u16 mss_min, mss_max; - __u8 invert; +struct xt_tcpmss_info { + __u16 mss; }; -#endif /*_XT_TCPMSS_MATCH_H*/ +#define XT_TCPMSS_CLAMP_PMTU 0xffff + +#endif /* _XT_TCPMSS_H */ diff --git a/include/uapi/linux/netfilter_ipv4/ipt_ECN.h b/include/uapi/linux/netfilter_ipv4/ipt_ECN.h index e3630fd045..8121bec470 100644 --- a/include/uapi/linux/netfilter_ipv4/ipt_ECN.h +++ b/include/uapi/linux/netfilter_ipv4/ipt_ECN.h @@ -1,34 +1,16 @@ /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -/* Header file for iptables ipt_ECN target - * - * (C) 2002 by Harald Welte - * - * This software is distributed under GNU GPL v2, 1991 - * - * ipt_ECN.h,v 1.3 2002/05/29 12:17:40 laforge Exp -*/ -#ifndef _IPT_ECN_TARGET_H -#define _IPT_ECN_TARGET_H +#ifndef _IPT_ECN_H +#define _IPT_ECN_H -#include -#include +#include +#define ipt_ecn_info xt_ecn_info -#define IPT_ECN_IP_MASK (~XT_DSCP_MASK) - -#define IPT_ECN_OP_SET_IP 0x01 /* set ECN bits of IPv4 header */ -#define IPT_ECN_OP_SET_ECE 0x10 /* set ECE bit of TCP header */ -#define IPT_ECN_OP_SET_CWR 0x20 /* set CWR bit of TCP header */ - -#define IPT_ECN_OP_MASK 0xce - -struct ipt_ECN_info { - __u8 operation; /* bitset of operations */ - __u8 ip_ect; /* ECT codepoint of IPv4 header, pre-shifted */ - union { - struct { - __u8 ece:1, cwr:1; /* TCP ECT bits */ - } tcp; - } proto; +enum { + IPT_ECN_IP_MASK = XT_ECN_IP_MASK, + IPT_ECN_OP_MATCH_IP = XT_ECN_OP_MATCH_IP, + IPT_ECN_OP_MATCH_ECE = XT_ECN_OP_MATCH_ECE, + IPT_ECN_OP_MATCH_CWR = XT_ECN_OP_MATCH_CWR, + IPT_ECN_OP_MATCH_MASK = XT_ECN_OP_MATCH_MASK, }; -#endif /* _IPT_ECN_TARGET_H */ +#endif /* IPT_ECN_H */ diff --git a/include/uapi/linux/netfilter_ipv4/ipt_ttl.h b/include/uapi/linux/netfilter_ipv4/ipt_ttl.h index ad0226a862..57d2fc67a9 100644 --- a/include/uapi/linux/netfilter_ipv4/ipt_ttl.h +++ b/include/uapi/linux/netfilter_ipv4/ipt_ttl.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -/* IP tables module for matching the value of the TTL - * (C) 2000 by Harald Welte */ +/* TTL modification module for IP tables + * (C) 2000 by Harald Welte */ #ifndef _IPT_TTL_H #define _IPT_TTL_H @@ -8,14 +8,14 @@ #include enum { - IPT_TTL_EQ = 0, /* equals */ - IPT_TTL_NE, /* not equals */ - IPT_TTL_LT, /* less than */ - IPT_TTL_GT, /* greater than */ + IPT_TTL_SET = 0, + IPT_TTL_INC, + IPT_TTL_DEC }; +#define IPT_TTL_MAXMODE IPT_TTL_DEC -struct ipt_ttl_info { +struct ipt_TTL_info { __u8 mode; __u8 ttl; }; diff --git a/include/uapi/linux/netfilter_ipv6/ip6t_HL.h b/include/uapi/linux/netfilter_ipv6/ip6t_HL.h index eaed56a287..6b62f9418e 100644 --- a/include/uapi/linux/netfilter_ipv6/ip6t_HL.h +++ b/include/uapi/linux/netfilter_ipv6/ip6t_HL.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -/* Hop Limit modification module for ip6tables +/* ip6tables module for matching the Hop Limit value * Maciej Soltysiak - * Based on HW's TTL module */ + * Based on HW's ttl module */ #ifndef _IP6T_HL_H #define _IP6T_HL_H @@ -9,14 +9,14 @@ #include enum { - IP6T_HL_SET = 0, - IP6T_HL_INC, - IP6T_HL_DEC + IP6T_HL_EQ = 0, /* equals */ + IP6T_HL_NE, /* not equals */ + IP6T_HL_LT, /* less than */ + IP6T_HL_GT, /* greater than */ }; -#define IP6T_HL_MAXMODE IP6T_HL_DEC -struct ip6t_HL_info { +struct ip6t_hl_info { __u8 mode; __u8 hop_limit; }; diff --git a/include/uapi/linux/netlink.h b/include/uapi/linux/netlink.h index 3d94269bbf..4c0cde075c 100644 --- a/include/uapi/linux/netlink.h +++ b/include/uapi/linux/netlink.h @@ -91,9 +91,10 @@ struct nlmsghdr { #define NLMSG_HDRLEN ((int) NLMSG_ALIGN(sizeof(struct nlmsghdr))) #define NLMSG_LENGTH(len) ((len) + NLMSG_HDRLEN) #define NLMSG_SPACE(len) NLMSG_ALIGN(NLMSG_LENGTH(len)) -#define NLMSG_DATA(nlh) ((void*)(((char*)nlh) + NLMSG_LENGTH(0))) +#define NLMSG_DATA(nlh) ((void *)(((char *)nlh) + NLMSG_HDRLEN)) #define NLMSG_NEXT(nlh,len) ((len) -= NLMSG_ALIGN((nlh)->nlmsg_len), \ - (struct nlmsghdr*)(((char*)(nlh)) + NLMSG_ALIGN((nlh)->nlmsg_len))) + (struct nlmsghdr *)(((char *)(nlh)) + \ + NLMSG_ALIGN((nlh)->nlmsg_len))) #define NLMSG_OK(nlh,len) ((len) >= (int)sizeof(struct nlmsghdr) && \ (nlh)->nlmsg_len >= sizeof(struct nlmsghdr) && \ (nlh)->nlmsg_len <= (len)) diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index f962c06e98..db474994fa 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -11,7 +11,7 @@ * Copyright 2008 Jouni Malinen * Copyright 2008 Colin McCabe * Copyright 2015-2017 Intel Deutschland GmbH - * Copyright (C) 2018-2020 Intel Corporation + * Copyright (C) 2018-2021 Intel Corporation * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -3654,6 +3654,8 @@ enum nl80211_mpath_info { * defined * @NL80211_BAND_IFTYPE_ATTR_HE_6GHZ_CAPA: HE 6GHz band capabilities (__le16), * given for all 6 GHz band channels + * @NL80211_BAND_IFTYPE_ATTR_VENDOR_ELEMS: vendor element capabilities that are + * advertised on this band/for this iftype (binary) * @__NL80211_BAND_IFTYPE_ATTR_AFTER_LAST: internal use */ enum nl80211_band_iftype_attr { @@ -3665,6 +3667,7 @@ enum nl80211_band_iftype_attr { NL80211_BAND_IFTYPE_ATTR_HE_CAP_MCS_SET, NL80211_BAND_IFTYPE_ATTR_HE_CAP_PPE, NL80211_BAND_IFTYPE_ATTR_HE_6GHZ_CAPA, + NL80211_BAND_IFTYPE_ATTR_VENDOR_ELEMS, /* keep last */ __NL80211_BAND_IFTYPE_ATTR_AFTER_LAST, @@ -6912,6 +6915,9 @@ enum nl80211_peer_measurement_ftm_capa { * @NL80211_PMSR_FTM_REQ_ATTR_LMR_FEEDBACK: negotiate for LMR feedback. Only * valid if either %NL80211_PMSR_FTM_REQ_ATTR_TRIGGER_BASED or * %NL80211_PMSR_FTM_REQ_ATTR_NON_TRIGGER_BASED is set. + * @NL80211_PMSR_FTM_REQ_ATTR_BSS_COLOR: optional. The BSS color of the + * responder. Only valid if %NL80211_PMSR_FTM_REQ_ATTR_NON_TRIGGER_BASED + * or %NL80211_PMSR_FTM_REQ_ATTR_TRIGGER_BASED is set. * * @NUM_NL80211_PMSR_FTM_REQ_ATTR: internal * @NL80211_PMSR_FTM_REQ_ATTR_MAX: highest attribute number @@ -6931,6 +6937,7 @@ enum nl80211_peer_measurement_ftm_req { NL80211_PMSR_FTM_REQ_ATTR_TRIGGER_BASED, NL80211_PMSR_FTM_REQ_ATTR_NON_TRIGGER_BASED, NL80211_PMSR_FTM_REQ_ATTR_LMR_FEEDBACK, + NL80211_PMSR_FTM_REQ_ATTR_BSS_COLOR, /* keep last */ NUM_NL80211_PMSR_FTM_REQ_ATTR, diff --git a/include/uapi/linux/pcitest.h b/include/uapi/linux/pcitest.h index c3ab4c8262..f9c1af8d14 100644 --- a/include/uapi/linux/pcitest.h +++ b/include/uapi/linux/pcitest.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -/** +/* * pcitest.h - PCI test uapi defines * * Copyright (C) 2017 Texas Instruments diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h index 18a9f59dc0..967d9c5532 100644 --- a/include/uapi/linux/prctl.h +++ b/include/uapi/linux/prctl.h @@ -259,4 +259,12 @@ struct prctl_mm_map { #define PR_PAC_SET_ENABLED_KEYS 60 #define PR_PAC_GET_ENABLED_KEYS 61 +/* Request the scheduler to share a core */ +#define PR_SCHED_CORE 62 +# define PR_SCHED_CORE_GET 0 +# define PR_SCHED_CORE_CREATE 1 /* create unique core_sched cookie */ +# define PR_SCHED_CORE_SHARE_TO 2 /* push core_sched cookie to pid */ +# define PR_SCHED_CORE_SHARE_FROM 3 /* pull core_sched cookie to pid */ +# define PR_SCHED_CORE_MAX 4 + #endif /* _LINUX_PRCTL_H */ diff --git a/include/uapi/linux/sctp.h b/include/uapi/linux/sctp.h index cb78e7a739..c4ff1ebd8b 100644 --- a/include/uapi/linux/sctp.h +++ b/include/uapi/linux/sctp.h @@ -141,6 +141,7 @@ typedef __s32 sctp_assoc_t; #define SCTP_EXPOSE_POTENTIALLY_FAILED_STATE 131 #define SCTP_EXPOSE_PF_STATE SCTP_EXPOSE_POTENTIALLY_FAILED_STATE #define SCTP_REMOTE_UDP_ENCAPS_PORT 132 +#define SCTP_PLPMTUD_PROBE_INTERVAL 133 /* PR-SCTP policies */ #define SCTP_PR_SCTP_NONE 0x0000 @@ -1213,4 +1214,11 @@ enum sctp_sched_type { SCTP_SS_MAX = SCTP_SS_RR }; +/* Probe Interval socket option */ +struct sctp_probeinterval { + sctp_assoc_t spi_assoc_id; + struct sockaddr_storage spi_address; + __u32 spi_interval; +}; + #endif /* _UAPI_SCTP_H */ diff --git a/include/uapi/linux/seccomp.h b/include/uapi/linux/seccomp.h index 6ba18b82a0..78074254ab 100644 --- a/include/uapi/linux/seccomp.h +++ b/include/uapi/linux/seccomp.h @@ -115,6 +115,7 @@ struct seccomp_notif_resp { /* valid flags for seccomp_notif_addfd */ #define SECCOMP_ADDFD_FLAG_SETFD (1UL << 0) /* Specify remote fd */ +#define SECCOMP_ADDFD_FLAG_SEND (1UL << 1) /* Addfd and return it, atomically */ /** * struct seccomp_notif_addfd diff --git a/include/uapi/linux/seg6_local.h b/include/uapi/linux/seg6_local.h index 5ae3ace84d..332b18f318 100644 --- a/include/uapi/linux/seg6_local.h +++ b/include/uapi/linux/seg6_local.h @@ -64,6 +64,8 @@ enum { SEG6_LOCAL_ACTION_END_AM = 14, /* custom BPF action */ SEG6_LOCAL_ACTION_END_BPF = 15, + /* decap and lookup of DA in v4 or v6 table */ + SEG6_LOCAL_ACTION_END_DT46 = 16, __SEG6_LOCAL_ACTION_MAX, }; diff --git a/include/uapi/linux/smc.h b/include/uapi/linux/smc.h index 3e68da07fb..0f7f87c70b 100644 --- a/include/uapi/linux/smc.h +++ b/include/uapi/linux/smc.h @@ -47,6 +47,8 @@ enum { SMC_NETLINK_GET_LGR_SMCD, SMC_NETLINK_GET_DEV_SMCD, SMC_NETLINK_GET_DEV_SMCR, + SMC_NETLINK_GET_STATS, + SMC_NETLINK_GET_FBACK_STATS, }; /* SMC_GENL_FAMILY top level attributes */ @@ -58,6 +60,8 @@ enum { SMC_GEN_LGR_SMCD, /* nest */ SMC_GEN_DEV_SMCD, /* nest */ SMC_GEN_DEV_SMCR, /* nest */ + SMC_GEN_STATS, /* nest */ + SMC_GEN_FBACK_STATS, /* nest */ __SMC_GEN_MAX, SMC_GEN_MAX = __SMC_GEN_MAX - 1 }; @@ -159,4 +163,83 @@ enum { SMC_NLA_DEV_MAX = __SMC_NLA_DEV_MAX - 1 }; +/* SMC_NLA_STATS_T_TX(RX)_RMB_SIZE nested attributes */ +/* SMC_NLA_STATS_TX(RX)PLOAD_SIZE nested attributes */ +enum { + SMC_NLA_STATS_PLOAD_PAD, + SMC_NLA_STATS_PLOAD_8K, /* u64 */ + SMC_NLA_STATS_PLOAD_16K, /* u64 */ + SMC_NLA_STATS_PLOAD_32K, /* u64 */ + SMC_NLA_STATS_PLOAD_64K, /* u64 */ + SMC_NLA_STATS_PLOAD_128K, /* u64 */ + SMC_NLA_STATS_PLOAD_256K, /* u64 */ + SMC_NLA_STATS_PLOAD_512K, /* u64 */ + SMC_NLA_STATS_PLOAD_1024K, /* u64 */ + SMC_NLA_STATS_PLOAD_G_1024K, /* u64 */ + __SMC_NLA_STATS_PLOAD_MAX, + SMC_NLA_STATS_PLOAD_MAX = __SMC_NLA_STATS_PLOAD_MAX - 1 +}; + +/* SMC_NLA_STATS_T_TX(RX)_RMB_STATS nested attributes */ +enum { + SMC_NLA_STATS_RMB_PAD, + SMC_NLA_STATS_RMB_SIZE_SM_PEER_CNT, /* u64 */ + SMC_NLA_STATS_RMB_SIZE_SM_CNT, /* u64 */ + SMC_NLA_STATS_RMB_FULL_PEER_CNT, /* u64 */ + SMC_NLA_STATS_RMB_FULL_CNT, /* u64 */ + SMC_NLA_STATS_RMB_REUSE_CNT, /* u64 */ + SMC_NLA_STATS_RMB_ALLOC_CNT, /* u64 */ + SMC_NLA_STATS_RMB_DGRADE_CNT, /* u64 */ + __SMC_NLA_STATS_RMB_MAX, + SMC_NLA_STATS_RMB_MAX = __SMC_NLA_STATS_RMB_MAX - 1 +}; + +/* SMC_NLA_STATS_SMCD_TECH and _SMCR_TECH nested attributes */ +enum { + SMC_NLA_STATS_T_PAD, + SMC_NLA_STATS_T_TX_RMB_SIZE, /* nest */ + SMC_NLA_STATS_T_RX_RMB_SIZE, /* nest */ + SMC_NLA_STATS_T_TXPLOAD_SIZE, /* nest */ + SMC_NLA_STATS_T_RXPLOAD_SIZE, /* nest */ + SMC_NLA_STATS_T_TX_RMB_STATS, /* nest */ + SMC_NLA_STATS_T_RX_RMB_STATS, /* nest */ + SMC_NLA_STATS_T_CLNT_V1_SUCC, /* u64 */ + SMC_NLA_STATS_T_CLNT_V2_SUCC, /* u64 */ + SMC_NLA_STATS_T_SRV_V1_SUCC, /* u64 */ + SMC_NLA_STATS_T_SRV_V2_SUCC, /* u64 */ + SMC_NLA_STATS_T_SENDPAGE_CNT, /* u64 */ + SMC_NLA_STATS_T_SPLICE_CNT, /* u64 */ + SMC_NLA_STATS_T_CORK_CNT, /* u64 */ + SMC_NLA_STATS_T_NDLY_CNT, /* u64 */ + SMC_NLA_STATS_T_URG_DATA_CNT, /* u64 */ + SMC_NLA_STATS_T_RX_BYTES, /* u64 */ + SMC_NLA_STATS_T_TX_BYTES, /* u64 */ + SMC_NLA_STATS_T_RX_CNT, /* u64 */ + SMC_NLA_STATS_T_TX_CNT, /* u64 */ + __SMC_NLA_STATS_T_MAX, + SMC_NLA_STATS_T_MAX = __SMC_NLA_STATS_T_MAX - 1 +}; + +/* SMC_GEN_STATS attributes */ +enum { + SMC_NLA_STATS_PAD, + SMC_NLA_STATS_SMCD_TECH, /* nest */ + SMC_NLA_STATS_SMCR_TECH, /* nest */ + SMC_NLA_STATS_CLNT_HS_ERR_CNT, /* u64 */ + SMC_NLA_STATS_SRV_HS_ERR_CNT, /* u64 */ + __SMC_NLA_STATS_MAX, + SMC_NLA_STATS_MAX = __SMC_NLA_STATS_MAX - 1 +}; + +/* SMC_GEN_FBACK_STATS attributes */ +enum { + SMC_NLA_FBACK_STATS_PAD, + SMC_NLA_FBACK_STATS_TYPE, /* u8 */ + SMC_NLA_FBACK_STATS_SRV_CNT, /* u64 */ + SMC_NLA_FBACK_STATS_CLNT_CNT, /* u64 */ + SMC_NLA_FBACK_STATS_RSN_CODE, /* u32 */ + SMC_NLA_FBACK_STATS_RSN_CNT, /* u16 */ + __SMC_NLA_FBACK_STATS_MAX, + SMC_NLA_FBACK_STATS_MAX = __SMC_NLA_FBACK_STATS_MAX - 1 +}; #endif /* _UAPI_LINUX_SMC_H */ diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h index 26fc60ce92..904909d020 100644 --- a/include/uapi/linux/snmp.h +++ b/include/uapi/linux/snmp.h @@ -290,6 +290,8 @@ enum LINUX_MIB_TCPDUPLICATEDATAREHASH, /* TCPDuplicateDataRehash */ LINUX_MIB_TCPDSACKRECVSEGS, /* TCPDSACKRecvSegs */ LINUX_MIB_TCPDSACKIGNOREDDUBIOUS, /* TCPDSACKIgnoredDubious */ + LINUX_MIB_TCPMIGRATEREQSUCCESS, /* TCPMigrateReqSuccess */ + LINUX_MIB_TCPMIGRATEREQFAILURE, /* TCPMigrateReqFailure */ __LINUX_MIB_MAX }; diff --git a/include/uapi/linux/surface_aggregator/cdev.h b/include/uapi/linux/surface_aggregator/cdev.h index fbcce04abf..08f46b60b1 100644 --- a/include/uapi/linux/surface_aggregator/cdev.h +++ b/include/uapi/linux/surface_aggregator/cdev.h @@ -6,7 +6,7 @@ * device. This device provides direct user-space access to the SSAM EC. * Intended for debugging and development. * - * Copyright (C) 2020 Maximilian Luz + * Copyright (C) 2020-2021 Maximilian Luz */ #ifndef _UAPI_LINUX_SURFACE_AGGREGATOR_CDEV_H @@ -73,6 +73,75 @@ struct ssam_cdev_request { } response; } __attribute__((__packed__)); -#define SSAM_CDEV_REQUEST _IOWR(0xA5, 1, struct ssam_cdev_request) +/** + * struct ssam_cdev_notifier_desc - Notifier descriptor. + * @priority: Priority value determining the order in which notifier + * callbacks will be called. A higher value means higher + * priority, i.e. the associated callback will be executed + * earlier than other (lower priority) callbacks. + * @target_category: The event target category for which this notifier should + * receive events. + * + * Specifies the notifier that should be registered or unregistered, + * specifically with which priority and for which target category of events. + */ +struct ssam_cdev_notifier_desc { + __s32 priority; + __u8 target_category; +} __attribute__((__packed__)); + +/** + * struct ssam_cdev_event_desc - Event descriptor. + * @reg: Registry via which the event will be enabled/disabled. + * @reg.target_category: Target category for the event registry requests. + * @reg.target_id: Target ID for the event registry requests. + * @reg.cid_enable: Command ID for the event-enable request. + * @reg.cid_disable: Command ID for the event-disable request. + * @id: ID specifying the event. + * @id.target_category: Target category of the event source. + * @id.instance: Instance ID of the event source. + * @flags: Flags used for enabling the event. + * + * Specifies which event should be enabled/disabled and how to do that. + */ +struct ssam_cdev_event_desc { + struct { + __u8 target_category; + __u8 target_id; + __u8 cid_enable; + __u8 cid_disable; + } reg; + + struct { + __u8 target_category; + __u8 instance; + } id; + + __u8 flags; +} __attribute__((__packed__)); + +/** + * struct ssam_cdev_event - SSAM event sent by the EC. + * @target_category: Target category of the event source. See &enum ssam_ssh_tc. + * @target_id: Target ID of the event source. + * @command_id: Command ID of the event. + * @instance_id: Instance ID of the event source. + * @length: Length of the event payload in bytes. + * @data: Event payload data. + */ +struct ssam_cdev_event { + __u8 target_category; + __u8 target_id; + __u8 command_id; + __u8 instance_id; + __u16 length; + __u8 data[]; +} __attribute__((__packed__)); + +#define SSAM_CDEV_REQUEST _IOWR(0xA5, 1, struct ssam_cdev_request) +#define SSAM_CDEV_NOTIF_REGISTER _IOW(0xA5, 2, struct ssam_cdev_notifier_desc) +#define SSAM_CDEV_NOTIF_UNREGISTER _IOW(0xA5, 3, struct ssam_cdev_notifier_desc) +#define SSAM_CDEV_EVENT_ENABLE _IOW(0xA5, 4, struct ssam_cdev_event_desc) +#define SSAM_CDEV_EVENT_DISABLE _IOW(0xA5, 5, struct ssam_cdev_event_desc) #endif /* _UAPI_LINUX_SURFACE_AGGREGATOR_CDEV_H */ diff --git a/include/uapi/linux/userfaultfd.h b/include/uapi/linux/userfaultfd.h index 650480f41f..05b31d60ac 100644 --- a/include/uapi/linux/userfaultfd.h +++ b/include/uapi/linux/userfaultfd.h @@ -31,7 +31,8 @@ UFFD_FEATURE_MISSING_SHMEM | \ UFFD_FEATURE_SIGBUS | \ UFFD_FEATURE_THREAD_ID | \ - UFFD_FEATURE_MINOR_HUGETLBFS) + UFFD_FEATURE_MINOR_HUGETLBFS | \ + UFFD_FEATURE_MINOR_SHMEM) #define UFFD_API_IOCTLS \ ((__u64)1 << _UFFDIO_REGISTER | \ (__u64)1 << _UFFDIO_UNREGISTER | \ @@ -185,6 +186,9 @@ struct uffdio_api { * UFFD_FEATURE_MINOR_HUGETLBFS indicates that minor faults * can be intercepted (via REGISTER_MODE_MINOR) for * hugetlbfs-backed pages. + * + * UFFD_FEATURE_MINOR_SHMEM indicates the same support as + * UFFD_FEATURE_MINOR_HUGETLBFS, but for shmem-backed pages instead. */ #define UFFD_FEATURE_PAGEFAULT_FLAG_WP (1<<0) #define UFFD_FEATURE_EVENT_FORK (1<<1) @@ -196,6 +200,7 @@ struct uffdio_api { #define UFFD_FEATURE_SIGBUS (1<<7) #define UFFD_FEATURE_THREAD_ID (1<<8) #define UFFD_FEATURE_MINOR_HUGETLBFS (1<<9) +#define UFFD_FEATURE_MINOR_SHMEM (1<<10) __u64 features; __u64 ioctls; diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h index 6922ca1da7..c9e6e695b8 100644 --- a/include/uapi/linux/v4l2-controls.h +++ b/include/uapi/linux/v4l2-controls.h @@ -50,6 +50,7 @@ #ifndef __LINUX_V4L2_CONTROLS_H #define __LINUX_V4L2_CONTROLS_H +#include #include /* Control classes */ @@ -1607,30 +1608,30 @@ struct v4l2_ctrl_h264_decode_params { #define V4L2_FWHT_VERSION 3 /* Set if this is an interlaced format */ -#define V4L2_FWHT_FL_IS_INTERLACED BIT(0) +#define V4L2_FWHT_FL_IS_INTERLACED _BITUL(0) /* Set if this is a bottom-first (NTSC) interlaced format */ -#define V4L2_FWHT_FL_IS_BOTTOM_FIRST BIT(1) +#define V4L2_FWHT_FL_IS_BOTTOM_FIRST _BITUL(1) /* Set if each 'frame' contains just one field */ -#define V4L2_FWHT_FL_IS_ALTERNATE BIT(2) +#define V4L2_FWHT_FL_IS_ALTERNATE _BITUL(2) /* * If V4L2_FWHT_FL_IS_ALTERNATE was set, then this is set if this * 'frame' is the bottom field, else it is the top field. */ -#define V4L2_FWHT_FL_IS_BOTTOM_FIELD BIT(3) +#define V4L2_FWHT_FL_IS_BOTTOM_FIELD _BITUL(3) /* Set if the Y' plane is uncompressed */ -#define V4L2_FWHT_FL_LUMA_IS_UNCOMPRESSED BIT(4) +#define V4L2_FWHT_FL_LUMA_IS_UNCOMPRESSED _BITUL(4) /* Set if the Cb plane is uncompressed */ -#define V4L2_FWHT_FL_CB_IS_UNCOMPRESSED BIT(5) +#define V4L2_FWHT_FL_CB_IS_UNCOMPRESSED _BITUL(5) /* Set if the Cr plane is uncompressed */ -#define V4L2_FWHT_FL_CR_IS_UNCOMPRESSED BIT(6) +#define V4L2_FWHT_FL_CR_IS_UNCOMPRESSED _BITUL(6) /* Set if the chroma plane is full height, if cleared it is half height */ -#define V4L2_FWHT_FL_CHROMA_FULL_HEIGHT BIT(7) +#define V4L2_FWHT_FL_CHROMA_FULL_HEIGHT _BITUL(7) /* Set if the chroma plane is full width, if cleared it is half width */ -#define V4L2_FWHT_FL_CHROMA_FULL_WIDTH BIT(8) +#define V4L2_FWHT_FL_CHROMA_FULL_WIDTH _BITUL(8) /* Set if the alpha plane is uncompressed */ -#define V4L2_FWHT_FL_ALPHA_IS_UNCOMPRESSED BIT(9) +#define V4L2_FWHT_FL_ALPHA_IS_UNCOMPRESSED _BITUL(9) /* Set if this is an I Frame */ -#define V4L2_FWHT_FL_I_FRAME BIT(10) +#define V4L2_FWHT_FL_I_FRAME _BITUL(10) /* A 4-values flag - the number of components - 1 */ #define V4L2_FWHT_FL_COMPONENTS_NUM_MSK GENMASK(18, 16) @@ -1867,6 +1868,118 @@ struct v4l2_ctrl_vp8_frame { __u64 flags; }; +/* Stateless MPEG-2 controls */ + +#define V4L2_MPEG2_SEQ_FLAG_PROGRESSIVE 0x01 + +#define V4L2_CID_STATELESS_MPEG2_SEQUENCE (V4L2_CID_CODEC_STATELESS_BASE+220) +/** + * struct v4l2_ctrl_mpeg2_sequence - MPEG-2 sequence header + * + * All the members on this structure match the sequence header and sequence + * extension syntaxes as specified by the MPEG-2 specification. + * + * Fields horizontal_size, vertical_size and vbv_buffer_size are a + * combination of respective _value and extension syntax elements, + * as described in section 6.3.3 "Sequence header". + * + * @horizontal_size: combination of elements horizontal_size_value and + * horizontal_size_extension. + * @vertical_size: combination of elements vertical_size_value and + * vertical_size_extension. + * @vbv_buffer_size: combination of elements vbv_buffer_size_value and + * vbv_buffer_size_extension. + * @profile_and_level_indication: see MPEG-2 specification. + * @chroma_format: see MPEG-2 specification. + * @flags: see V4L2_MPEG2_SEQ_FLAG_{}. + */ +struct v4l2_ctrl_mpeg2_sequence { + __u16 horizontal_size; + __u16 vertical_size; + __u32 vbv_buffer_size; + __u16 profile_and_level_indication; + __u8 chroma_format; + __u8 flags; +}; + +#define V4L2_MPEG2_PIC_CODING_TYPE_I 1 +#define V4L2_MPEG2_PIC_CODING_TYPE_P 2 +#define V4L2_MPEG2_PIC_CODING_TYPE_B 3 +#define V4L2_MPEG2_PIC_CODING_TYPE_D 4 + +#define V4L2_MPEG2_PIC_TOP_FIELD 0x1 +#define V4L2_MPEG2_PIC_BOTTOM_FIELD 0x2 +#define V4L2_MPEG2_PIC_FRAME 0x3 + +#define V4L2_MPEG2_PIC_FLAG_TOP_FIELD_FIRST 0x0001 +#define V4L2_MPEG2_PIC_FLAG_FRAME_PRED_DCT 0x0002 +#define V4L2_MPEG2_PIC_FLAG_CONCEALMENT_MV 0x0004 +#define V4L2_MPEG2_PIC_FLAG_Q_SCALE_TYPE 0x0008 +#define V4L2_MPEG2_PIC_FLAG_INTRA_VLC 0x0010 +#define V4L2_MPEG2_PIC_FLAG_ALT_SCAN 0x0020 +#define V4L2_MPEG2_PIC_FLAG_REPEAT_FIRST 0x0040 +#define V4L2_MPEG2_PIC_FLAG_PROGRESSIVE 0x0080 + +#define V4L2_CID_STATELESS_MPEG2_PICTURE (V4L2_CID_CODEC_STATELESS_BASE+221) +/** + * struct v4l2_ctrl_mpeg2_picture - MPEG-2 picture header + * + * All the members on this structure match the picture header and picture + * coding extension syntaxes as specified by the MPEG-2 specification. + * + * @backward_ref_ts: timestamp of the V4L2 capture buffer to use as + * reference for backward prediction. + * @forward_ref_ts: timestamp of the V4L2 capture buffer to use as + * reference for forward prediction. These timestamp refers to the + * timestamp field in struct v4l2_buffer. Use v4l2_timeval_to_ns() + * to convert the struct timeval to a __u64. + * @flags: see V4L2_MPEG2_PIC_FLAG_{}. + * @f_code: see MPEG-2 specification. + * @picture_coding_type: see MPEG-2 specification. + * @picture_structure: see V4L2_MPEG2_PIC_{}_FIELD. + * @intra_dc_precision: see MPEG-2 specification. + * @reserved: padding field. Should be zeroed by applications. + */ +struct v4l2_ctrl_mpeg2_picture { + __u64 backward_ref_ts; + __u64 forward_ref_ts; + __u32 flags; + __u8 f_code[2][2]; + __u8 picture_coding_type; + __u8 picture_structure; + __u8 intra_dc_precision; + __u8 reserved[5]; +}; + +#define V4L2_CID_STATELESS_MPEG2_QUANTISATION (V4L2_CID_CODEC_STATELESS_BASE+222) +/** + * struct v4l2_ctrl_mpeg2_quantisation - MPEG-2 quantisation + * + * Quantisation matrices as specified by section 6.3.7 + * "Quant matrix extension". + * + * @intra_quantiser_matrix: The quantisation matrix coefficients + * for intra-coded frames, in zigzag scanning order. It is relevant + * for both luma and chroma components, although it can be superseded + * by the chroma-specific matrix for non-4:2:0 YUV formats. + * @non_intra_quantiser_matrix: The quantisation matrix coefficients + * for non-intra-coded frames, in zigzag scanning order. It is relevant + * for both luma and chroma components, although it can be superseded + * by the chroma-specific matrix for non-4:2:0 YUV formats. + * @chroma_intra_quantiser_matrix: The quantisation matrix coefficients + * for the chominance component of intra-coded frames, in zigzag scanning + * order. Only relevant for 4:2:2 and 4:4:4 YUV formats. + * @chroma_non_intra_quantiser_matrix: The quantisation matrix coefficients + * for the chrominance component of non-intra-coded frames, in zigzag scanning + * order. Only relevant for 4:2:2 and 4:4:4 YUV formats. + */ +struct v4l2_ctrl_mpeg2_quantisation { + __u8 intra_quantiser_matrix[64]; + __u8 non_intra_quantiser_matrix[64]; + __u8 chroma_intra_quantiser_matrix[64]; + __u8 chroma_non_intra_quantiser_matrix[64]; +}; + #define V4L2_CID_COLORIMETRY_CLASS_BASE (V4L2_CTRL_CLASS_COLORIMETRY | 0x900) #define V4L2_CID_COLORIMETRY_CLASS (V4L2_CTRL_CLASS_COLORIMETRY | 1) diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h index 88d73aab73..12f356c486 100644 --- a/include/uapi/linux/videodev2.h +++ b/include/uapi/linux/videodev2.h @@ -1760,6 +1760,9 @@ struct v4l2_ext_control { struct v4l2_ctrl_h264_decode_params __user *p_h264_decode_params; struct v4l2_ctrl_fwht_params __user *p_fwht_params; struct v4l2_ctrl_vp8_frame __user *p_vp8_frame; + struct v4l2_ctrl_mpeg2_sequence __user *p_mpeg2_sequence; + struct v4l2_ctrl_mpeg2_picture __user *p_mpeg2_picture; + struct v4l2_ctrl_mpeg2_quantisation __user *p_mpeg2_quantisation; void __user *ptr; }; } __attribute__ ((packed)); @@ -1820,6 +1823,10 @@ enum v4l2_ctrl_type { V4L2_CTRL_TYPE_FWHT_PARAMS = 0x0220, V4L2_CTRL_TYPE_VP8_FRAME = 0x0240, + + V4L2_CTRL_TYPE_MPEG2_QUANTISATION = 0x0250, + V4L2_CTRL_TYPE_MPEG2_SEQUENCE = 0x0251, + V4L2_CTRL_TYPE_MPEG2_PICTURE = 0x0252, }; /* Used in the VIDIOC_QUERYCTRL ioctl for querying controls */ diff --git a/include/uapi/linux/virtio_ids.h b/include/uapi/linux/virtio_ids.h index 4fe842c3a3..70a8057ad4 100644 --- a/include/uapi/linux/virtio_ids.h +++ b/include/uapi/linux/virtio_ids.h @@ -57,4 +57,16 @@ #define VIRTIO_ID_MAC80211_HWSIM 29 /* virtio mac80211-hwsim */ #define VIRTIO_ID_BT 40 /* virtio bluetooth */ +/* + * Virtio Transitional IDs + */ + +#define VIRTIO_TRANS_ID_NET 1000 /* transitional virtio net */ +#define VIRTIO_TRANS_ID_BLOCK 1001 /* transitional virtio block */ +#define VIRTIO_TRANS_ID_BALLOON 1002 /* transitional virtio balloon */ +#define VIRTIO_TRANS_ID_CONSOLE 1003 /* transitional virtio console */ +#define VIRTIO_TRANS_ID_SCSI 1004 /* transitional virtio SCSI */ +#define VIRTIO_TRANS_ID_RNG 1005 /* transitional virtio rng */ +#define VIRTIO_TRANS_ID_9P 1009 /* transitional virtio 9p console */ + #endif /* _LINUX_VIRTIO_IDS_H */ diff --git a/include/uapi/linux/virtio_pcidev.h b/include/uapi/linux/virtio_pcidev.h new file mode 100644 index 0000000000..89daa88bcf --- /dev/null +++ b/include/uapi/linux/virtio_pcidev.h @@ -0,0 +1,64 @@ +/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ +/* + * Copyright (C) 2021 Intel Corporation + * Author: Johannes Berg + */ +#ifndef _UAPI_LINUX_VIRTIO_PCIDEV_H +#define _UAPI_LINUX_VIRTIO_PCIDEV_H +#include + +/** + * enum virtio_pcidev_ops - virtual PCI device operations + * @VIRTIO_PCIDEV_OP_CFG_READ: read config space, size is 1, 2, 4 or 8; + * the @data field should be filled in by the device (in little endian). + * @VIRTIO_PCIDEV_OP_CFG_WRITE: write config space, size is 1, 2, 4 or 8; + * the @data field contains the data to write (in little endian). + * @VIRTIO_PCIDEV_OP_BAR_READ: read BAR mem/pio, size can be variable; + * the @data field should be filled in by the device (in little endian). + * @VIRTIO_PCIDEV_OP_BAR_WRITE: write BAR mem/pio, size can be variable; + * the @data field contains the data to write (in little endian). + * @VIRTIO_PCIDEV_OP_MMIO_MEMSET: memset MMIO, size is variable but + * the @data field only has one byte (unlike @VIRTIO_PCIDEV_OP_MMIO_WRITE) + * @VIRTIO_PCIDEV_OP_INT: legacy INTx# pin interrupt, the addr field is 1-4 for + * the number + * @VIRTIO_PCIDEV_OP_MSI: MSI(-X) interrupt, this message basically transports + * the 16- or 32-bit write that would otherwise be done into memory, + * analogous to the write messages (@VIRTIO_PCIDEV_OP_MMIO_WRITE) above + * @VIRTIO_PCIDEV_OP_PME: Dummy message whose content is ignored (and should be + * all zeroes) to signal the PME# pin. + */ +enum virtio_pcidev_ops { + VIRTIO_PCIDEV_OP_RESERVED = 0, + VIRTIO_PCIDEV_OP_CFG_READ, + VIRTIO_PCIDEV_OP_CFG_WRITE, + VIRTIO_PCIDEV_OP_MMIO_READ, + VIRTIO_PCIDEV_OP_MMIO_WRITE, + VIRTIO_PCIDEV_OP_MMIO_MEMSET, + VIRTIO_PCIDEV_OP_INT, + VIRTIO_PCIDEV_OP_MSI, + VIRTIO_PCIDEV_OP_PME, +}; + +/** + * struct virtio_pcidev_msg - virtio PCI device operation + * @op: the operation to do + * @bar: the bar (only with BAR read/write messages) + * @reserved: reserved + * @size: the size of the read/write (in bytes) + * @addr: the address to read/write + * @data: the data, normally @size long, but just one byte for + * %VIRTIO_PCIDEV_OP_MMIO_MEMSET + * + * Note: the fields are all in native (CPU) endian, however, the + * @data values will often be in little endian (see the ops above.) + */ +struct virtio_pcidev_msg { + __u8 op; + __u8 bar; + __u16 reserved; + __u32 size; + __u64 addr; + __u8 data[]; +}; + +#endif /* _UAPI_LINUX_VIRTIO_PCIDEV_H */ diff --git a/include/uapi/linux/virtio_vsock.h b/include/uapi/linux/virtio_vsock.h index 1d57ed3d84..3dd3555b27 100644 --- a/include/uapi/linux/virtio_vsock.h +++ b/include/uapi/linux/virtio_vsock.h @@ -38,6 +38,9 @@ #include #include +/* The feature bitmap for virtio vsock */ +#define VIRTIO_VSOCK_F_SEQPACKET 1 /* SOCK_SEQPACKET supported */ + struct virtio_vsock_config { __le64 guest_cid; } __attribute__((packed)); @@ -65,6 +68,7 @@ struct virtio_vsock_hdr { enum virtio_vsock_type { VIRTIO_VSOCK_TYPE_STREAM = 1, + VIRTIO_VSOCK_TYPE_SEQPACKET = 2, }; enum virtio_vsock_op { @@ -91,4 +95,9 @@ enum virtio_vsock_shutdown { VIRTIO_VSOCK_SHUTDOWN_SEND = 2, }; +/* VIRTIO_VSOCK_OP_RW flags values */ +enum virtio_vsock_rw { + VIRTIO_VSOCK_SEQ_EOR = 1, +}; + #endif /* _UAPI_LINUX_VIRTIO_VSOCK_H */ diff --git a/include/uapi/linux/wwan.h b/include/uapi/linux/wwan.h new file mode 100644 index 0000000000..32a2720b4d --- /dev/null +++ b/include/uapi/linux/wwan.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ +/* + * Copyright (C) 2021 Intel Corporation. + */ +#ifndef _UAPI_WWAN_H_ +#define _UAPI_WWAN_H_ + +enum { + IFLA_WWAN_UNSPEC, + IFLA_WWAN_LINK_ID, /* u32 */ + + __IFLA_WWAN_MAX +}; +#define IFLA_WWAN_MAX (__IFLA_WWAN_MAX - 1) + +#endif /* _UAPI_WWAN_H_ */ diff --git a/include/uapi/misc/habanalabs.h b/include/uapi/misc/habanalabs.h index 6d2d34c9f3..a47a731e45 100644 --- a/include/uapi/misc/habanalabs.h +++ b/include/uapi/misc/habanalabs.h @@ -313,6 +313,7 @@ enum hl_device_status { * HL_INFO_SYNC_MANAGER - Retrieve sync manager info per dcore * HL_INFO_TOTAL_ENERGY - Retrieve total energy consumption * HL_INFO_PLL_FREQUENCY - Retrieve PLL frequency + * HL_INFO_OPEN_STATS - Retrieve info regarding recent device open calls */ #define HL_INFO_HW_IP_INFO 0 #define HL_INFO_HW_EVENTS 1 @@ -331,6 +332,7 @@ enum hl_device_status { #define HL_INFO_TOTAL_ENERGY 15 #define HL_INFO_PLL_FREQUENCY 16 #define HL_INFO_POWER 17 +#define HL_INFO_OPEN_STATS 18 #define HL_INFO_VERSION_MAX_LEN 128 #define HL_INFO_CARD_NAME_MAX_LEN 16 @@ -444,6 +446,16 @@ struct hl_pll_frequency_info { __u16 output[HL_PLL_NUM_OUTPUTS]; }; +/** + * struct hl_open_stats_info - device open statistics information + * @open_counter: ever growing counter, increased on each successful dev open + * @last_open_period_ms: duration (ms) device was open last time + */ +struct hl_open_stats_info { + __u64 open_counter; + __u64 last_open_period_ms; +}; + /** * struct hl_power_info - power information * @power: power consumption @@ -664,6 +676,7 @@ struct hl_cs_chunk { #define HL_CS_FLAGS_STAGED_SUBMISSION_FIRST 0x80 #define HL_CS_FLAGS_STAGED_SUBMISSION_LAST 0x100 #define HL_CS_FLAGS_CUSTOM_TIMEOUT 0x200 +#define HL_CS_FLAGS_SKIP_RESET_ON_TIMEOUT 0x400 #define HL_CS_STATUS_SUCCESS 0 diff --git a/include/uapi/rdma/bnxt_re-abi.h b/include/uapi/rdma/bnxt_re-abi.h index dc52e3cf57..b1de99bf56 100644 --- a/include/uapi/rdma/bnxt_re-abi.h +++ b/include/uapi/rdma/bnxt_re-abi.h @@ -49,7 +49,14 @@ #define BNXT_RE_CHIP_ID0_CHIP_MET_SFT 0x18 enum { - BNXT_RE_UCNTX_CMASK_HAVE_CCTX = 0x1ULL + BNXT_RE_UCNTX_CMASK_HAVE_CCTX = 0x1ULL, + BNXT_RE_UCNTX_CMASK_HAVE_MODE = 0x02ULL, +}; + +enum bnxt_re_wqe_mode { + BNXT_QPLIB_WQE_MODE_STATIC = 0x00, + BNXT_QPLIB_WQE_MODE_VARIABLE = 0x01, + BNXT_QPLIB_WQE_MODE_INVALID = 0x02, }; struct bnxt_re_uctx_resp { @@ -62,6 +69,8 @@ struct bnxt_re_uctx_resp { __aligned_u64 comp_mask; __u32 chip_id0; __u32 chip_id1; + __u32 mode; + __u32 rsvd1; /* padding */ }; /* diff --git a/include/uapi/rdma/ib_user_ioctl_verbs.h b/include/uapi/rdma/ib_user_ioctl_verbs.h index 22483799cd..3072e5d6b6 100644 --- a/include/uapi/rdma/ib_user_ioctl_verbs.h +++ b/include/uapi/rdma/ib_user_ioctl_verbs.h @@ -240,6 +240,7 @@ enum rdma_driver_id { RDMA_DRIVER_OCRDMA, RDMA_DRIVER_NES, RDMA_DRIVER_I40IW, + RDMA_DRIVER_IRDMA = RDMA_DRIVER_I40IW, RDMA_DRIVER_VMW_PVRDMA, RDMA_DRIVER_QEDR, RDMA_DRIVER_HNS, diff --git a/include/uapi/rdma/ib_user_mad.h b/include/uapi/rdma/ib_user_mad.h index 90c0cf2280..10b5f6a4c6 100644 --- a/include/uapi/rdma/ib_user_mad.h +++ b/include/uapi/rdma/ib_user_mad.h @@ -143,7 +143,7 @@ struct ib_user_mad_hdr { */ struct ib_user_mad { struct ib_user_mad_hdr hdr; - __aligned_u64 data[0]; + __aligned_u64 data[]; }; /* diff --git a/include/uapi/rdma/irdma-abi.h b/include/uapi/rdma/irdma-abi.h new file mode 100644 index 0000000000..26b638a7ad --- /dev/null +++ b/include/uapi/rdma/irdma-abi.h @@ -0,0 +1,111 @@ +/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB) */ +/* + * Copyright (c) 2006 - 2021 Intel Corporation. All rights reserved. + * Copyright (c) 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Cisco Systems. All rights reserved. + * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. + */ + +#ifndef IRDMA_ABI_H +#define IRDMA_ABI_H + +#include + +/* irdma must support legacy GEN_1 i40iw kernel + * and user-space whose last ABI ver is 5 + */ +#define IRDMA_ABI_VER 5 + +enum irdma_memreg_type { + IRDMA_MEMREG_TYPE_MEM = 0, + IRDMA_MEMREG_TYPE_QP = 1, + IRDMA_MEMREG_TYPE_CQ = 2, +}; + +struct irdma_alloc_ucontext_req { + __u32 rsvd32; + __u8 userspace_ver; + __u8 rsvd8[3]; +}; + +struct irdma_alloc_ucontext_resp { + __u32 max_pds; + __u32 max_qps; + __u32 wq_size; /* size of the WQs (SQ+RQ) in the mmaped area */ + __u8 kernel_ver; + __u8 rsvd[3]; + __aligned_u64 feature_flags; + __aligned_u64 db_mmap_key; + __u32 max_hw_wq_frags; + __u32 max_hw_read_sges; + __u32 max_hw_inline; + __u32 max_hw_rq_quanta; + __u32 max_hw_wq_quanta; + __u32 min_hw_cq_size; + __u32 max_hw_cq_size; + __u16 max_hw_sq_chunk; + __u8 hw_rev; + __u8 rsvd2; +}; + +struct irdma_alloc_pd_resp { + __u32 pd_id; + __u8 rsvd[4]; +}; + +struct irdma_resize_cq_req { + __aligned_u64 user_cq_buffer; +}; + +struct irdma_create_cq_req { + __aligned_u64 user_cq_buf; + __aligned_u64 user_shadow_area; +}; + +struct irdma_create_qp_req { + __aligned_u64 user_wqe_bufs; + __aligned_u64 user_compl_ctx; +}; + +struct irdma_mem_reg_req { + __u16 reg_type; /* enum irdma_memreg_type */ + __u16 cq_pages; + __u16 rq_pages; + __u16 sq_pages; +}; + +struct irdma_modify_qp_req { + __u8 sq_flush; + __u8 rq_flush; + __u8 rsvd[6]; +}; + +struct irdma_create_cq_resp { + __u32 cq_id; + __u32 cq_size; +}; + +struct irdma_create_qp_resp { + __u32 qp_id; + __u32 actual_sq_size; + __u32 actual_rq_size; + __u32 irdma_drv_opt; + __u16 push_idx; + __u8 lsmm; + __u8 rsvd; + __u32 qp_caps; +}; + +struct irdma_modify_qp_resp { + __aligned_u64 push_wqe_mmap_key; + __aligned_u64 push_db_mmap_key; + __u16 push_offset; + __u8 push_valid; + __u8 rsvd[5]; +}; + +struct irdma_create_ah_resp { + __u32 ah_id; + __u8 rsvd[4]; +}; +#endif /* IRDMA_ABI_H */ diff --git a/include/uapi/rdma/mlx5-abi.h b/include/uapi/rdma/mlx5-abi.h index 27905a0268..8597e6f22a 100644 --- a/include/uapi/rdma/mlx5-abi.h +++ b/include/uapi/rdma/mlx5-abi.h @@ -101,6 +101,8 @@ enum mlx5_ib_alloc_ucontext_resp_mask { MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET = 1UL << 0, MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_DUMP_FILL_MKEY = 1UL << 1, MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_ECE = 1UL << 2, + MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_SQD2RTS = 1UL << 3, + MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_REAL_TIME_TS = 1UL << 4, }; enum mlx5_user_cmds_supp_uhw { @@ -270,6 +272,7 @@ struct mlx5_ib_query_device_resp { enum mlx5_ib_create_cq_flags { MLX5_IB_CREATE_CQ_FLAGS_CQE_128B_PAD = 1 << 0, MLX5_IB_CREATE_CQ_FLAGS_UAR_PAGE_INDEX = 1 << 1, + MLX5_IB_CREATE_CQ_FLAGS_REAL_TIME_TS = 1 << 2, }; struct mlx5_ib_create_cq { diff --git a/include/uapi/rdma/rdma_user_rxe.h b/include/uapi/rdma/rdma_user_rxe.h index 068433e222..e283c2220a 100644 --- a/include/uapi/rdma/rdma_user_rxe.h +++ b/include/uapi/rdma/rdma_user_rxe.h @@ -99,7 +99,16 @@ struct rxe_send_wr { __u32 remote_qkey; __u16 pkey_index; } ud; + struct { + __aligned_u64 addr; + __aligned_u64 length; + __u32 mr_lkey; + __u32 mw_rkey; + __u32 rkey; + __u32 access; + } mw; /* reg is only used by the kernel and is not part of the uapi */ +#ifdef __KERNEL__ struct { union { struct ib_mr *mr; @@ -108,6 +117,7 @@ struct rxe_send_wr { __u32 key; __u32 access; } reg; +#endif } wr; }; diff --git a/include/uapi/sound/asound.h b/include/uapi/sound/asound.h index 535a7229e1..d17c061950 100644 --- a/include/uapi/sound/asound.h +++ b/include/uapi/sound/asound.h @@ -710,7 +710,7 @@ enum { * Raw MIDI section - /dev/snd/midi?? */ -#define SNDRV_RAWMIDI_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 1) +#define SNDRV_RAWMIDI_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 2) enum { SNDRV_RAWMIDI_STREAM_OUTPUT = 0, @@ -736,12 +736,38 @@ struct snd_rawmidi_info { unsigned char reserved[64]; /* reserved for future use */ }; +#define SNDRV_RAWMIDI_MODE_FRAMING_MASK (7<<0) +#define SNDRV_RAWMIDI_MODE_FRAMING_SHIFT 0 +#define SNDRV_RAWMIDI_MODE_FRAMING_NONE (0<<0) +#define SNDRV_RAWMIDI_MODE_FRAMING_TSTAMP (1<<0) +#define SNDRV_RAWMIDI_MODE_CLOCK_MASK (7<<3) +#define SNDRV_RAWMIDI_MODE_CLOCK_SHIFT 3 +#define SNDRV_RAWMIDI_MODE_CLOCK_NONE (0<<3) +#define SNDRV_RAWMIDI_MODE_CLOCK_REALTIME (1<<3) +#define SNDRV_RAWMIDI_MODE_CLOCK_MONOTONIC (2<<3) +#define SNDRV_RAWMIDI_MODE_CLOCK_MONOTONIC_RAW (3<<3) + +#define SNDRV_RAWMIDI_FRAMING_DATA_LENGTH 16 + +struct snd_rawmidi_framing_tstamp { + /* For now, frame_type is always 0. Midi 2.0 is expected to add new + * types here. Applications are expected to skip unknown frame types. + */ + __u8 frame_type; + __u8 length; /* number of valid bytes in data field */ + __u8 reserved[2]; + __u32 tv_nsec; /* nanoseconds */ + __u64 tv_sec; /* seconds */ + __u8 data[SNDRV_RAWMIDI_FRAMING_DATA_LENGTH]; +} __packed; + struct snd_rawmidi_params { int stream; size_t buffer_size; /* queue size in bytes */ size_t avail_min; /* minimum avail bytes for wakeup */ unsigned int no_active_sensing: 1; /* do not send active sensing byte in close() */ - unsigned char reserved[16]; /* reserved for future use */ + unsigned int mode; /* For input data only, frame incoming data */ + unsigned char reserved[12]; /* reserved for future use */ }; #ifndef __KERNEL__ diff --git a/include/video/imx-ipu-v3.h b/include/video/imx-ipu-v3.h index d1b3889f74..c422a403c0 100644 --- a/include/video/imx-ipu-v3.h +++ b/include/video/imx-ipu-v3.h @@ -17,6 +17,7 @@ #include #include #include +#include #include #include